hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
8589e395abbc12c34c140653d4b8b7a4680977f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ================================================================================================
// Tim Backus
// CIS 450 - High Performance Computing
// 3D Game of Life - CUDA Version
// ================================================================================================
#define GOL_IO_FILENAME "gol3DOutput.dat"
#define GOL_CUDA_THREADS_SIZE 8
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#include <stdlib.h>
// ------------------------------------------------------------------------------------------------
// CUDA kernel (Gather & Map) - Adds up the number of neighbors for a cell in a 3x3x3 cube and
// sets each cell to alive or dead depending on its number of neighbors and the rules for this
// current game.
// ------------------------------------------------------------------------------------------------
__global__
void lifeItrKernel(const char* const d_in, char* d_out, const unsigned int xsize,
const unsigned int ysize, const unsigned int zsize, const unsigned int alow,
const unsigned int ahigh) {
extern __shared__ char shMem[];
// Calculate block and thread IDs
const int threadPosX = blockIdx.x * blockDim.x + threadIdx.x;
const int threadPosY = blockIdx.y * blockDim.y + threadIdx.y;
const int threadPosZ = blockIdx.z * blockDim.z + threadIdx.z;
const unsigned int stepX = ysize * zsize;
const unsigned int arrayPos = threadPosX * stepX + threadPosY * zsize + threadPosZ;
const unsigned int threadID = threadIdx.x * blockDim.y * blockDim.z +
threadIdx.y * blockDim.z + threadIdx.z;
// Ensure thread bounds
if(threadPosX >= xsize) return;
if(threadPosY >= ysize) return;
if(threadPosZ >= zsize) return;
// Copy global into shared memory
shMem[threadID] = d_in[arrayPos];
__syncthreads();
// Begin adding neighbors
char sum = 0;
// X-Axis neighbors
int xc, xcoord;
for(xc = threadPosX - 1; xc <= threadPosX + 1; xc++) {
// Wrap X-Axis
xcoord = xc;
if(xc < 0) xcoord = xsize;
else if(xc >= xsize) xcoord = 0;
// Y-Axis neighbors
int yc, ycoord;
for(yc = threadPosY - 1; yc <= threadPosY + 1; yc++) {
// Wrap Y-Axis
ycoord = yc;
if(yc < 0) ycoord = ysize;
else if(yc >= ysize) ycoord = 0;
// Z-Axis neighbors
int zc, zcoord;
for(zc = threadPosZ - 1; zc <= threadPosZ + 1; zc++) {
// Wrap Z-Axis
zcoord = zc;
if(zc < 0) zcoord = zsize;
else if(zc >= zsize) zcoord = 0;
// Don't count the cell itself
if(threadPosX != xcoord || threadPosY != ycoord || threadPosZ != zcoord) {
// Use shared memory instead of global memory if the current coord is in the thread block
if((xcoord >= blockDim.x * blockIdx.x && xcoord < (blockDim.x) * blockIdx.x + 1) &&
(ycoord >= blockDim.y * blockIdx.y && ycoord < (blockDim.y) * blockIdx.y + 1) &&
(zcoord >= blockDim.z * blockIdx.z && zcoord < (blockDim.z) * blockIdx.z + 1)) {
sum += shMem[(xcoord % blockDim.x) * blockDim.y * blockDim.z + (ycoord % blockDim.y) *
blockDim.z + (zcoord % blockDim.z)];
} else {
sum += d_in[xcoord * stepX + ycoord * zsize + zcoord];
}
}
}
}
}
// Set the cell's dead or alive status based on its neighbor count
if (sum < alow || sum > ahigh) {
d_out[arrayPos] = 0;
} else if (sum >= alow && sum <= ahigh) {
d_out[arrayPos] = 1;
}
}
// ------------------------------------------------------------------------------------------------
// CUDA kernel (Gather) - Adds up the number of neighbors for a cell in a 3x3x3 cube.
// ------------------------------------------------------------------------------------------------
__global__
void sumNeighborsKernel(const char* const d_in, char* d_out, const unsigned int xsize,
const unsigned int ysize, const unsigned int zsize) {
// Calculate block and thread IDs
const int threadPosX = blockIdx.x * blockDim.x + threadIdx.x;
const int threadPosY = blockIdx.y * blockDim.y + threadIdx.y;
const int threadPosZ = blockIdx.z * blockDim.z + threadIdx.z;
const unsigned int stepX = ysize * zsize;
const unsigned int arrayPos = threadPosX * stepX + threadPosY * zsize + threadPosZ;
// printf("TID=%d,%d,%d\n", threadIdx.x, threadIdx.y, threadIdx.z);
// printf("TPOS=%d,%d,%d\n", threadPosX, threadPosY, threadPosZ);
// printf("APOS=%d\n", arrayPos);
// Ensure thread bounds
if(threadPosX > xsize - 1) return;
if(threadPosY > ysize - 1) return;
if(threadPosZ > zsize - 1) return;
char sum = 0;
// X-Axis neighbors
int xc, xcoord;
for(xc = threadPosX - 1; xc <= threadPosX + 1; xc++) {
// Wrap X-Axis
xcoord = xc;
if(xc < 0) xcoord = xsize;
else if(xc >= xsize) xcoord = 0;
// Y-Axis neighbors
int yc, ycoord;
for(yc = threadPosY - 1; yc <= threadPosY + 1; yc++) {
// Wrap Y-Axis
ycoord = yc;
if(yc < 0) ycoord = ysize;
else if(yc >= ysize) ycoord = 0;
// Z-Axis neighbors
int zc, zcoord;
for(zc = threadPosZ - 1; zc <= threadPosZ + 1; zc++) {
// Wrap Z-Axis
zcoord = zc;
if(zc < 0) zcoord = zsize;
else if(zc >= zsize) zcoord = 0;
// Don't count the cell itself
if(threadPosX != xcoord || threadPosY != ycoord || threadPosZ != zcoord) {
sum += d_in[xcoord * stepX + ycoord * zsize + zcoord];
}
}
}
}
d_out[arrayPos] = sum;
}
// ------------------------------------------------------------------------------------------------
// CUDA kernel (Map) - Sets each cell to alive or dead depending on its number of neighbors and
// the rules for this current game.
// ------------------------------------------------------------------------------------------------
__global__
void setAliveDeadKernel(const char* const d_nei, char* d_out, const unsigned int xs,
const unsigned int ys, const unsigned int zs, const unsigned int alow,
const unsigned int ahigh) {
// Calculate block and thread IDs
const int threadPosX = blockIdx.x * blockDim.x + threadIdx.x;
const int threadPosY = blockIdx.y * blockDim.y + threadIdx.y;
const int threadPosZ = blockIdx.z * blockDim.z + threadIdx.z;
const int stepX = ys * zs;
const int arrayPos = threadPosX * stepX + threadPosY * zs + threadPosZ;
// Ensure thread bounds
if(threadPosX > xs - 1) return;
if(threadPosY > ys - 1) return;
if(threadPosZ > zs - 1) return;
// Set the cell alive or dead as according to the rules
if (d_nei[arrayPos] < alow || d_nei[arrayPos] > ahigh) {
d_out[arrayPos] = 0;
} else if (d_nei[arrayPos] >= alow && d_nei[arrayPos] <= ahigh) {
d_out[arrayPos] = 1;
}
}
// ------------------------------------------------------------------------------------------------
// Returns the 1D position of a simulated 3D array
// ------------------------------------------------------------------------------------------------
int getArrIndex(const unsigned int xp, const unsigned int yp, const unsigned int zp,
const unsigned int ys, const unsigned int zs) {
return xp * ys * zs + yp * zs + zp;
}
// ------------------------------------------------------------------------------------------------
// Prints a 3D array.
// ------------------------------------------------------------------------------------------------
void print3DArray(char* arr, unsigned const int x, unsigned const int y, unsigned const int z) {
int i;
for(i = 0; i < x; ++i) {
printf("Dimension %d:\n", i);
int j;
for(j = 0; j < y; ++j) {
int k;
for(k = 0; k < z; ++k) {
printf("%d ", (char)arr[getArrIndex(i, j, k, y, z)]);
}
printf("\n");
}
printf("\n");
}
}
// ------------------------------------------------------------------------------------------------
// Writes cells to alive or dead, randomly.
// ------------------------------------------------------------------------------------------------
void randomizeGrid(char* grid, unsigned const int size, unsigned const int chance) {
srand(time(NULL));
int i;
for(i = 0; i < size; i++) {
grid[i] = (char)((rand() % 100 <= chance) ? 1 : 0);
}
}
// ------------------------------------------------------------------------------------------------
// Initializes the game data file.
// Line 1: <iteration count> <x-size> <y-size> <z-size>
// Line 2: Blank
// ------------------------------------------------------------------------------------------------
void initGameFile(const unsigned int itrs, const unsigned int x, const unsigned int y,
const unsigned int z) {
FILE *fp;
fp = fopen(GOL_IO_FILENAME, "w+");
fprintf(fp, "%d %d %d %d\n\n", itrs, x, y, z);
fclose(fp);
}
// ------------------------------------------------------------------------------------------------
// Writes a game to a file for visualization within Java.
// For every iteration, a block of text is created of the format:
// "<x-coord>:<z-coords for y=0>, <z-coords for y=1>, ..."
// Z-coords are represented by a 0 or 1 for each z-coordinate
// Example: Game with 5 iterations, x=3, y=7, z=4
// 5 3 7 4
//
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
//
// ------------------------------------------------------------------------------------------------
void writeGameStep(char* arr, unsigned const int x, unsigned const int y, unsigned const int z) {
FILE *fp;
fp = fopen(GOL_IO_FILENAME, "a");
int i;
for(i = 0; i < x; i++) {
fprintf(fp, "%d:", i);
int j;
for(j = 0; j < y; j++) {
if(j > 0) {
fprintf(fp, ",");
}
// Print Z-Dim values
int k;
for(k = 0; k < z; k++) {
fprintf(fp, "%d", arr[getArrIndex(i, j, k, y, z)]);
}
}
fprintf(fp, "\n");
}
fclose(fp);
}
// ------------------------------------------------------------------------------------------------
// Runs the Game of Life.
// ------------------------------------------------------------------------------------------------
void runLife(const unsigned int iterations, unsigned int xsize, const unsigned int ysize,
const unsigned int zsize, const unsigned int initc, const unsigned int alow,
const unsigned int ahigh, const unsigned int printArr, const unsigned int writeOut) {
// Memory values
const unsigned int arrSize = xsize * ysize * zsize;
const unsigned int arrMem = arrSize * sizeof(char);
// GPU grid dimensions
const int gx = ceil((double) xsize / GOL_CUDA_THREADS_SIZE);
const int gy = ceil((double) ysize / GOL_CUDA_THREADS_SIZE);
const int gz = ceil((double) zsize / GOL_CUDA_THREADS_SIZE);
printf("Grid dimension: %d,%d,%d\n", gx, gy, gz);
dim3 gridDim(gx, gy, gz);
// GPU thread dimensions
const int tx = GOL_CUDA_THREADS_SIZE;
const int ty = GOL_CUDA_THREADS_SIZE;
const int tz = GOL_CUDA_THREADS_SIZE;
printf("Block dimension: %d,%d,%d\n", tx, ty, tz);
dim3 blockDim(tx, ty, tz);
// Initialize game space
char *h_in = (char *) malloc(arrMem);
printf("Randomizing initial game (could take a while)...\n");
randomizeGrid(h_in, arrSize, initc);
// Print the initial array if enabled
if(printArr) {
printf("Initial grid:\n");
print3DArray(h_in, xsize, ysize, zsize);
}
// Initialize the output file if enabled
if(writeOut) {
initGameFile(iterations, xsize, ysize, zsize);
writeGameStep(h_in, xsize, ysize, zsize);
}
// Pointers for GPU game data
char *d_in;
char *d_out;
// Allocate input array on GPU
printf("Allocating %d bytes of memory on the GPU...\n",
(int)(xsize * ysize * zsize * sizeof(char)));
hipMalloc(&d_in, arrMem);
// Allocate output array on GPU
hipMalloc(&d_out, arrMem);
// Copy the host data to the GPU
hipMemcpy(d_in, h_in, arrMem, hipMemcpyHostToDevice);
// Do Game of Life iterations
int itrNum;
for(itrNum = 0; itrNum < iterations; itrNum++) {
printf("Iteration %d ", itrNum);
// Run the kernel to simulate an iteration of 3D life
clock_t start = clock();
hipLaunchKernelGGL(( lifeItrKernel), dim3(gridDim), dim3(blockDim), (tx * ty * tz * sizeof(char)), 0, d_in, d_out, xsize, ysize, zsize, alow, ahigh);
hipError_t cerr = hipDeviceSynchronize();
if(cerr != hipSuccess) {
printf("Kernel lifeItr failed with error \"%s\".\n", hipGetErrorString(cerr));
}
clock_t end = clock();
// Copy the memory back to the input array for the next iteration
hipMemcpy(d_in, d_out, arrMem, hipMemcpyDeviceToDevice);
printf("took %d ticks.\n", (end - start));
// Print and write out if enabled
if(printArr || writeOut) {
hipMemcpy(h_in, d_out, arrMem, hipMemcpyDeviceToHost);
if(printArr) {
print3DArray(h_in, xsize, ysize, zsize);
}
if(writeOut) {
writeGameStep(h_in, xsize, ysize, zsize);
}
}
}
// Free memory
hipFree(d_in);
hipFree(d_out);
free(h_in);
}
// ------------------------------------------------------------------------------------------------
// Prints the usage message if a bad number of runtime arguments are passed.
// ------------------------------------------------------------------------------------------------
void printUsage() {
printf("Arguments (separated by spaces):\n");
printf(" MAX_ITERATIONS\n SIZE_X\n SIZE_Y\n SIZE_Z\n INITIAL_ALIVE_CHANCE (1-100)\n");
printf(" ALIVE_THRESHOLD_LOW (inclusive)\n ALIVE_THRESHOLD_HIGH (inclusive)\n");
printf(" PRINT_ARRAY? (0=no, 1=yes)\n WRITE_TO_FILE? (0=no, 1=yes)\n");
}
// ------------------------------------------------------------------------------------------------
// Main Method
// ------------------------------------------------------------------------------------------------
int main(int argc, char *argv[]) {
// Ensure proper runtime argument count
if(argc != 10) {
printUsage();
return EXIT_SUCCESS;
}
// Parse iteration count
unsigned const int iterations = atoi(argv[1]);
// Parse X-Size
unsigned const int sizeX = atoi(argv[2]);
// Parse Y-Size
unsigned const int sizeY = atoi(argv[3]);
// Parse Z-Size
unsigned const int sizeZ = atoi(argv[4]);
// Parse initial alive chance
unsigned const int initChance = atoi(argv[5]);
// Parse alive low threshold (inclusive)
unsigned const int aliveLow = atoi(argv[6]);
// Parse alive high threshold (inclusive)
unsigned const int aliveHigh = atoi(argv[7]);
// Parse whether or not to print the array
unsigned const int printArray = atoi(argv[8]);
// Parse whether or not to output to disk
unsigned const int writeOut = atoi(argv[9]);
// Print game information to the console
printf("Starting %d iteration Game of Life (CUDA) with sizes x=%d, y=%d, z=%d\n", iterations,
sizeX, sizeY, sizeZ);
printf(" initial alive chance=%d, neighbors for alive=%d to %d\n", initChance,
aliveLow, aliveHigh);
if(writeOut) {
printf(" File output enabled.\n");
}
runLife(iterations, sizeX, sizeY, sizeZ, initChance, aliveLow, aliveHigh, printArray, writeOut);
return EXIT_SUCCESS;
} | 8589e395abbc12c34c140653d4b8b7a4680977f8.cu | // ================================================================================================
// Tim Backus
// CIS 450 - High Performance Computing
// 3D Game of Life - CUDA Version
// ================================================================================================
#define GOL_IO_FILENAME "gol3DOutput.dat"
#define GOL_CUDA_THREADS_SIZE 8
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#include <stdlib.h>
// ------------------------------------------------------------------------------------------------
// CUDA kernel (Gather & Map) - Adds up the number of neighbors for a cell in a 3x3x3 cube and
// sets each cell to alive or dead depending on its number of neighbors and the rules for this
// current game.
// ------------------------------------------------------------------------------------------------
__global__
void lifeItrKernel(const char* const d_in, char* d_out, const unsigned int xsize,
const unsigned int ysize, const unsigned int zsize, const unsigned int alow,
const unsigned int ahigh) {
extern __shared__ char shMem[];
// Calculate block and thread IDs
const int threadPosX = blockIdx.x * blockDim.x + threadIdx.x;
const int threadPosY = blockIdx.y * blockDim.y + threadIdx.y;
const int threadPosZ = blockIdx.z * blockDim.z + threadIdx.z;
const unsigned int stepX = ysize * zsize;
const unsigned int arrayPos = threadPosX * stepX + threadPosY * zsize + threadPosZ;
const unsigned int threadID = threadIdx.x * blockDim.y * blockDim.z +
threadIdx.y * blockDim.z + threadIdx.z;
// Ensure thread bounds
if(threadPosX >= xsize) return;
if(threadPosY >= ysize) return;
if(threadPosZ >= zsize) return;
// Copy global into shared memory
shMem[threadID] = d_in[arrayPos];
__syncthreads();
// Begin adding neighbors
char sum = 0;
// X-Axis neighbors
int xc, xcoord;
for(xc = threadPosX - 1; xc <= threadPosX + 1; xc++) {
// Wrap X-Axis
xcoord = xc;
if(xc < 0) xcoord = xsize;
else if(xc >= xsize) xcoord = 0;
// Y-Axis neighbors
int yc, ycoord;
for(yc = threadPosY - 1; yc <= threadPosY + 1; yc++) {
// Wrap Y-Axis
ycoord = yc;
if(yc < 0) ycoord = ysize;
else if(yc >= ysize) ycoord = 0;
// Z-Axis neighbors
int zc, zcoord;
for(zc = threadPosZ - 1; zc <= threadPosZ + 1; zc++) {
// Wrap Z-Axis
zcoord = zc;
if(zc < 0) zcoord = zsize;
else if(zc >= zsize) zcoord = 0;
// Don't count the cell itself
if(threadPosX != xcoord || threadPosY != ycoord || threadPosZ != zcoord) {
// Use shared memory instead of global memory if the current coord is in the thread block
if((xcoord >= blockDim.x * blockIdx.x && xcoord < (blockDim.x) * blockIdx.x + 1) &&
(ycoord >= blockDim.y * blockIdx.y && ycoord < (blockDim.y) * blockIdx.y + 1) &&
(zcoord >= blockDim.z * blockIdx.z && zcoord < (blockDim.z) * blockIdx.z + 1)) {
sum += shMem[(xcoord % blockDim.x) * blockDim.y * blockDim.z + (ycoord % blockDim.y) *
blockDim.z + (zcoord % blockDim.z)];
} else {
sum += d_in[xcoord * stepX + ycoord * zsize + zcoord];
}
}
}
}
}
// Set the cell's dead or alive status based on its neighbor count
if (sum < alow || sum > ahigh) {
d_out[arrayPos] = 0;
} else if (sum >= alow && sum <= ahigh) {
d_out[arrayPos] = 1;
}
}
// ------------------------------------------------------------------------------------------------
// CUDA kernel (Gather) - Adds up the number of neighbors for a cell in a 3x3x3 cube.
// ------------------------------------------------------------------------------------------------
__global__
void sumNeighborsKernel(const char* const d_in, char* d_out, const unsigned int xsize,
const unsigned int ysize, const unsigned int zsize) {
// Calculate block and thread IDs
const int threadPosX = blockIdx.x * blockDim.x + threadIdx.x;
const int threadPosY = blockIdx.y * blockDim.y + threadIdx.y;
const int threadPosZ = blockIdx.z * blockDim.z + threadIdx.z;
const unsigned int stepX = ysize * zsize;
const unsigned int arrayPos = threadPosX * stepX + threadPosY * zsize + threadPosZ;
// printf("TID=%d,%d,%d\n", threadIdx.x, threadIdx.y, threadIdx.z);
// printf("TPOS=%d,%d,%d\n", threadPosX, threadPosY, threadPosZ);
// printf("APOS=%d\n", arrayPos);
// Ensure thread bounds
if(threadPosX > xsize - 1) return;
if(threadPosY > ysize - 1) return;
if(threadPosZ > zsize - 1) return;
char sum = 0;
// X-Axis neighbors
int xc, xcoord;
for(xc = threadPosX - 1; xc <= threadPosX + 1; xc++) {
// Wrap X-Axis
xcoord = xc;
if(xc < 0) xcoord = xsize;
else if(xc >= xsize) xcoord = 0;
// Y-Axis neighbors
int yc, ycoord;
for(yc = threadPosY - 1; yc <= threadPosY + 1; yc++) {
// Wrap Y-Axis
ycoord = yc;
if(yc < 0) ycoord = ysize;
else if(yc >= ysize) ycoord = 0;
// Z-Axis neighbors
int zc, zcoord;
for(zc = threadPosZ - 1; zc <= threadPosZ + 1; zc++) {
// Wrap Z-Axis
zcoord = zc;
if(zc < 0) zcoord = zsize;
else if(zc >= zsize) zcoord = 0;
// Don't count the cell itself
if(threadPosX != xcoord || threadPosY != ycoord || threadPosZ != zcoord) {
sum += d_in[xcoord * stepX + ycoord * zsize + zcoord];
}
}
}
}
d_out[arrayPos] = sum;
}
// ------------------------------------------------------------------------------------------------
// CUDA kernel (Map) - Sets each cell to alive or dead depending on its number of neighbors and
// the rules for this current game.
// ------------------------------------------------------------------------------------------------
__global__
void setAliveDeadKernel(const char* const d_nei, char* d_out, const unsigned int xs,
const unsigned int ys, const unsigned int zs, const unsigned int alow,
const unsigned int ahigh) {
// Calculate block and thread IDs
const int threadPosX = blockIdx.x * blockDim.x + threadIdx.x;
const int threadPosY = blockIdx.y * blockDim.y + threadIdx.y;
const int threadPosZ = blockIdx.z * blockDim.z + threadIdx.z;
const int stepX = ys * zs;
const int arrayPos = threadPosX * stepX + threadPosY * zs + threadPosZ;
// Ensure thread bounds
if(threadPosX > xs - 1) return;
if(threadPosY > ys - 1) return;
if(threadPosZ > zs - 1) return;
// Set the cell alive or dead as according to the rules
if (d_nei[arrayPos] < alow || d_nei[arrayPos] > ahigh) {
d_out[arrayPos] = 0;
} else if (d_nei[arrayPos] >= alow && d_nei[arrayPos] <= ahigh) {
d_out[arrayPos] = 1;
}
}
// ------------------------------------------------------------------------------------------------
// Returns the 1D position of a simulated 3D array
// ------------------------------------------------------------------------------------------------
int getArrIndex(const unsigned int xp, const unsigned int yp, const unsigned int zp,
const unsigned int ys, const unsigned int zs) {
return xp * ys * zs + yp * zs + zp;
}
// ------------------------------------------------------------------------------------------------
// Prints a 3D array.
// ------------------------------------------------------------------------------------------------
void print3DArray(char* arr, unsigned const int x, unsigned const int y, unsigned const int z) {
int i;
for(i = 0; i < x; ++i) {
printf("Dimension %d:\n", i);
int j;
for(j = 0; j < y; ++j) {
int k;
for(k = 0; k < z; ++k) {
printf("%d ", (char)arr[getArrIndex(i, j, k, y, z)]);
}
printf("\n");
}
printf("\n");
}
}
// ------------------------------------------------------------------------------------------------
// Writes cells to alive or dead, randomly.
// ------------------------------------------------------------------------------------------------
void randomizeGrid(char* grid, unsigned const int size, unsigned const int chance) {
srand(time(NULL));
int i;
for(i = 0; i < size; i++) {
grid[i] = (char)((rand() % 100 <= chance) ? 1 : 0);
}
}
// ------------------------------------------------------------------------------------------------
// Initializes the game data file.
// Line 1: <iteration count> <x-size> <y-size> <z-size>
// Line 2: Blank
// ------------------------------------------------------------------------------------------------
void initGameFile(const unsigned int itrs, const unsigned int x, const unsigned int y,
const unsigned int z) {
FILE *fp;
fp = fopen(GOL_IO_FILENAME, "w+");
fprintf(fp, "%d %d %d %d\n\n", itrs, x, y, z);
fclose(fp);
}
// ------------------------------------------------------------------------------------------------
// Writes a game to a file for visualization within Java.
// For every iteration, a block of text is created of the format:
// "<x-coord>:<z-coords for y=0>, <z-coords for y=1>, ..."
// Z-coords are represented by a 0 or 1 for each z-coordinate
// Example: Game with 5 iterations, x=3, y=7, z=4
// 5 3 7 4
//
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
//
// ------------------------------------------------------------------------------------------------
void writeGameStep(char* arr, unsigned const int x, unsigned const int y, unsigned const int z) {
FILE *fp;
fp = fopen(GOL_IO_FILENAME, "a");
int i;
for(i = 0; i < x; i++) {
fprintf(fp, "%d:", i);
int j;
for(j = 0; j < y; j++) {
if(j > 0) {
fprintf(fp, ",");
}
// Print Z-Dim values
int k;
for(k = 0; k < z; k++) {
fprintf(fp, "%d", arr[getArrIndex(i, j, k, y, z)]);
}
}
fprintf(fp, "\n");
}
fclose(fp);
}
// ------------------------------------------------------------------------------------------------
// Runs the Game of Life.
// ------------------------------------------------------------------------------------------------
void runLife(const unsigned int iterations, unsigned int xsize, const unsigned int ysize,
const unsigned int zsize, const unsigned int initc, const unsigned int alow,
const unsigned int ahigh, const unsigned int printArr, const unsigned int writeOut) {
// Memory values
const unsigned int arrSize = xsize * ysize * zsize;
const unsigned int arrMem = arrSize * sizeof(char);
// GPU grid dimensions
const int gx = ceil((double) xsize / GOL_CUDA_THREADS_SIZE);
const int gy = ceil((double) ysize / GOL_CUDA_THREADS_SIZE);
const int gz = ceil((double) zsize / GOL_CUDA_THREADS_SIZE);
printf("Grid dimension: %d,%d,%d\n", gx, gy, gz);
dim3 gridDim(gx, gy, gz);
// GPU thread dimensions
const int tx = GOL_CUDA_THREADS_SIZE;
const int ty = GOL_CUDA_THREADS_SIZE;
const int tz = GOL_CUDA_THREADS_SIZE;
printf("Block dimension: %d,%d,%d\n", tx, ty, tz);
dim3 blockDim(tx, ty, tz);
// Initialize game space
char *h_in = (char *) malloc(arrMem);
printf("Randomizing initial game (could take a while)...\n");
randomizeGrid(h_in, arrSize, initc);
// Print the initial array if enabled
if(printArr) {
printf("Initial grid:\n");
print3DArray(h_in, xsize, ysize, zsize);
}
// Initialize the output file if enabled
if(writeOut) {
initGameFile(iterations, xsize, ysize, zsize);
writeGameStep(h_in, xsize, ysize, zsize);
}
// Pointers for GPU game data
char *d_in;
char *d_out;
// Allocate input array on GPU
printf("Allocating %d bytes of memory on the GPU...\n",
(int)(xsize * ysize * zsize * sizeof(char)));
cudaMalloc(&d_in, arrMem);
// Allocate output array on GPU
cudaMalloc(&d_out, arrMem);
// Copy the host data to the GPU
cudaMemcpy(d_in, h_in, arrMem, cudaMemcpyHostToDevice);
// Do Game of Life iterations
int itrNum;
for(itrNum = 0; itrNum < iterations; itrNum++) {
printf("Iteration %d ", itrNum);
// Run the kernel to simulate an iteration of 3D life
clock_t start = clock();
lifeItrKernel<<<gridDim, blockDim, (tx * ty * tz * sizeof(char))>>>(d_in, d_out, xsize, ysize, zsize, alow, ahigh);
cudaError_t cerr = cudaDeviceSynchronize();
if(cerr != cudaSuccess) {
printf("Kernel lifeItr failed with error \"%s\".\n", cudaGetErrorString(cerr));
}
clock_t end = clock();
// Copy the memory back to the input array for the next iteration
cudaMemcpy(d_in, d_out, arrMem, cudaMemcpyDeviceToDevice);
printf("took %d ticks.\n", (end - start));
// Print and write out if enabled
if(printArr || writeOut) {
cudaMemcpy(h_in, d_out, arrMem, cudaMemcpyDeviceToHost);
if(printArr) {
print3DArray(h_in, xsize, ysize, zsize);
}
if(writeOut) {
writeGameStep(h_in, xsize, ysize, zsize);
}
}
}
// Free memory
cudaFree(d_in);
cudaFree(d_out);
free(h_in);
}
// ------------------------------------------------------------------------------------------------
// Prints the usage message if a bad number of runtime arguments are passed.
// ------------------------------------------------------------------------------------------------
void printUsage() {
printf("Arguments (separated by spaces):\n");
printf(" MAX_ITERATIONS\n SIZE_X\n SIZE_Y\n SIZE_Z\n INITIAL_ALIVE_CHANCE (1-100)\n");
printf(" ALIVE_THRESHOLD_LOW (inclusive)\n ALIVE_THRESHOLD_HIGH (inclusive)\n");
printf(" PRINT_ARRAY? (0=no, 1=yes)\n WRITE_TO_FILE? (0=no, 1=yes)\n");
}
// ------------------------------------------------------------------------------------------------
// Main Method
// ------------------------------------------------------------------------------------------------
int main(int argc, char *argv[]) {
// Ensure proper runtime argument count
if(argc != 10) {
printUsage();
return EXIT_SUCCESS;
}
// Parse iteration count
unsigned const int iterations = atoi(argv[1]);
// Parse X-Size
unsigned const int sizeX = atoi(argv[2]);
// Parse Y-Size
unsigned const int sizeY = atoi(argv[3]);
// Parse Z-Size
unsigned const int sizeZ = atoi(argv[4]);
// Parse initial alive chance
unsigned const int initChance = atoi(argv[5]);
// Parse alive low threshold (inclusive)
unsigned const int aliveLow = atoi(argv[6]);
// Parse alive high threshold (inclusive)
unsigned const int aliveHigh = atoi(argv[7]);
// Parse whether or not to print the array
unsigned const int printArray = atoi(argv[8]);
// Parse whether or not to output to disk
unsigned const int writeOut = atoi(argv[9]);
// Print game information to the console
printf("Starting %d iteration Game of Life (CUDA) with sizes x=%d, y=%d, z=%d\n", iterations,
sizeX, sizeY, sizeZ);
printf(" initial alive chance=%d, neighbors for alive=%d to %d\n", initChance,
aliveLow, aliveHigh);
if(writeOut) {
printf(" File output enabled.\n");
}
runLife(iterations, sizeX, sizeY, sizeZ, initChance, aliveLow, aliveHigh, printArray, writeOut);
return EXIT_SUCCESS;
} |
0f78b294c6cb948aee68c8112d839b3b1be0ae7e.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = ::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
constexpr int64_t batch_limit = 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = ::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = ::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| 0f78b294c6cb948aee68c8112d839b3b1be0ae7e.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = std::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
constexpr int64_t batch_limit = 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = std::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = std::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
ed6b2cbdbf8a74770986d8c5a8e30e263ff632bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH/THH.h>
#include <THH/THHTensorMath.h>
#include <THH/THHGeneral.h>
#include <THH/THHBlas.h>
#include <THH/THHTensorCopy.h>
#include <TH/THHalf.h>
#include <THH/THHApply.cuh>
#include <THH/THHReduce.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHAtomics.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHTensor.hpp>
#include <THH/THHStorage.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <algorithm> // for std::min
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexCopyLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
assert(dstIndex < dstCopyDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexCopySmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstCopyDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
assert(dstIndex < dstCopyDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
assert(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
assert(dstIndex < dstAddDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexFillLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
assert(dstIndex_ < dstFillDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexFillSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
assert(dstIndex_ < dstFillDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
assert(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
assert(srcIndex < srcSelectDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size)
{
IndexType linearIndex = static_cast<IndexType>(index);
assert(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info);
}
struct WrapIndexOp {
WrapIndexOp(int64_t size) : size(size) {}
__device__ __forceinline__ void operator()(int64_t* out, int64_t* in) {
auto idx = *in;
assert(idx < size && idx >= -size);
*out = idx < 0 ? idx + size : idx;
}
int64_t size;
};
template <typename T, typename IndexType, int Dims>
struct TensorTakeOp {
TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* out, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
*out = info.data[offset];
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutOp {
TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
info.data[offset] = *value;
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutAccumulateOp {
TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end)
: info(info), numel(numel), start(start), end(end) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
if (index == start || *index != *(index - 1)) {
int64_t linear_index = *index;
auto offset = indexToOffset<Dims>(info, linear_index, numel);
do {
info.data[offset] = THCNumerics<T>::add(info.data[offset], *value);
index++;
value++;
} while (index != end && *index == linear_index);
}
}
const TensorInfo<T, IndexType> info;
IndexType numel;
int64_t* start;
int64_t* end;
};
template<typename IndexType, typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
// These are only valid if index is contiguous
auto start = THCudaLongTensor_data(state, index);
auto end = start + THCudaLongTensor_numel(state, index);
auto aInfo = getTensorInfo<T, TensorType, IndexType>(state, a);
aInfo.collapseDims();
auto numel = THCTensor_nElement(state, a);
if (aInfo.isContiguous()) {
auto op = Op<T, IndexType, -2>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
} else {
auto op = Op<T, IndexType, -1>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
}
}
template<typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
if (THCTensor_canUse32BitIndexMath(state, a, INT_MAX)) {
dispatchTakePutImpl<int32_t, T, Op>(state, a, b, index);
} else {
dispatchTakePutImpl<int64_t, T, Op>(state, a, b, index);
}
}
#include <THH/generic/THHTensorIndex.hip>
#include <THH/THHGenerateAllTypes.h>
#include <THH/generic/THHTensorIndex.hip>
#include <THH/THHGenerateBoolType.h>
| ed6b2cbdbf8a74770986d8c5a8e30e263ff632bb.cu | #include <THC/THC.h>
#include <THC/THCTensorMath.h>
#include <THC/THCGeneral.h>
#include <THC/THCBlas.h>
#include <THC/THCTensorCopy.h>
#include <TH/THHalf.h>
#include <THC/THCApply.cuh>
#include <THC/THCReduce.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCAtomics.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCTensor.hpp>
#include <THC/THCStorage.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <algorithm> // for std::min
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexCopyLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
assert(dstIndex < dstCopyDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexCopySmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstCopyDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
assert(dstIndex < dstCopyDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
assert(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
assert(dstIndex < dstAddDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexFillLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
assert(dstIndex_ < dstFillDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexFillSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
assert(dstIndex_ < dstFillDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
assert(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)];
assert(srcIndex < srcSelectDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size)
{
IndexType linearIndex = static_cast<IndexType>(index);
assert(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info);
}
struct WrapIndexOp {
WrapIndexOp(int64_t size) : size(size) {}
__device__ __forceinline__ void operator()(int64_t* out, int64_t* in) {
auto idx = *in;
assert(idx < size && idx >= -size);
*out = idx < 0 ? idx + size : idx;
}
int64_t size;
};
template <typename T, typename IndexType, int Dims>
struct TensorTakeOp {
TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* out, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
*out = info.data[offset];
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutOp {
TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
info.data[offset] = *value;
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutAccumulateOp {
TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end)
: info(info), numel(numel), start(start), end(end) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
if (index == start || *index != *(index - 1)) {
int64_t linear_index = *index;
auto offset = indexToOffset<Dims>(info, linear_index, numel);
do {
info.data[offset] = THCNumerics<T>::add(info.data[offset], *value);
index++;
value++;
} while (index != end && *index == linear_index);
}
}
const TensorInfo<T, IndexType> info;
IndexType numel;
int64_t* start;
int64_t* end;
};
template<typename IndexType, typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
// These are only valid if index is contiguous
auto start = THCudaLongTensor_data(state, index);
auto end = start + THCudaLongTensor_numel(state, index);
auto aInfo = getTensorInfo<T, TensorType, IndexType>(state, a);
aInfo.collapseDims();
auto numel = THCTensor_nElement(state, a);
if (aInfo.isContiguous()) {
auto op = Op<T, IndexType, -2>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
} else {
auto op = Op<T, IndexType, -1>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
}
}
template<typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
if (THCTensor_canUse32BitIndexMath(state, a, INT_MAX)) {
dispatchTakePutImpl<int32_t, T, Op>(state, a, b, index);
} else {
dispatchTakePutImpl<int64_t, T, Op>(state, a, b, index);
}
}
#include <THC/generic/THCTensorIndex.cu>
#include <THC/THCGenerateAllTypes.h>
#include <THC/generic/THCTensorIndex.cu>
#include <THC/THCGenerateBoolType.h>
|
a2927ccca9c0f6612d6d9a108fe0cd037f92349f.hip | // !!! This is a file automatically generated by hipify!!!
#include "luaT.h"
#include "TH.h"
#include "THH.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.h"
LUA_EXTERNC DLL_EXPORT int luaopen_libextracunn(lua_State *L);
int luaopen_libextracunn(lua_State *L)
{
lua_newtable(L);
extracunn_SpatialConvolutionMMNoBias_init(L);
extracunn_Huber_init(L);
extracunn_MSSECriterion_init(L);
return 1;
}
| a2927ccca9c0f6612d6d9a108fe0cd037f92349f.cu | #include "luaT.h"
#include "TH.h"
#include "THC.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.h"
LUA_EXTERNC DLL_EXPORT int luaopen_libextracunn(lua_State *L);
int luaopen_libextracunn(lua_State *L)
{
lua_newtable(L);
extracunn_SpatialConvolutionMMNoBias_init(L);
extracunn_Huber_init(L);
extracunn_MSSECriterion_init(L);
return 1;
}
|
617d81135c51153aa94061203b248e505ed3fdf4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__host__ void init_vects(int vect_len,float *h_vect1,float *h_vect2);
__global__ void vec_add(int vect_len, float *d_vect1, float *d_vect2, float *d_sum);
int main(int argc,char **argv)
{
hipEvent_t start=0;
hipEvent_t stop=0;
float time=0;
hipEventCreate(&start);
hipEventCreate(&stop);
int vect_len=1e8;
float h_vect1[vect_len], h_vect2[vect_len], h_sum[vect_len];
float *d_vect1, *d_vect2, *d_sum;
// initialization
init_vects(vect_len, h_vect1, h_vect2);
// tranfer vectors to global memory
hipMalloc((void **)&d_vect1 , vect_len*sizeof(float) );
hipMalloc((void **)&d_vect2 , vect_len*sizeof(float) );
hipMalloc((void **)&d_sum , vect_len*sizeof(float) );
hipMemcpy (d_vect1 , h_vect1 , vect_len*sizeof(float) , hipMemcpyHostToDevice);
hipMemcpy (d_vect2 , h_vect2 , vect_len*sizeof(float) , hipMemcpyHostToDevice);
// determine block and grid size.
dim3 DimGrid(48829,2 ,1);
dim3 DimBlock(1024,1,1);
hipEventRecord(start,0);hipLaunchKernelGGL((
vec_add), dim3(DimGrid),dim3(DimBlock), 0, 0, vect_len, d_vect1 ,d_vect2 ,d_sum);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipMemcpy(h_sum , d_sum , vect_len*sizeof(float) , hipMemcpyDeviceToHost);
//Free the Device array
hipFree (d_vect1);
hipFree (d_vect2);
hipFree (d_sum);
hipEventElapsedTime(&time,start,stop);
printf("time of the Kernel %f \n",time );
printf("v1=%f ,, v2 =%f ,, sum=%f \n",h_vect1[0],h_vect2[0],h_sum[0]);
return 0;
}
__global__ void vec_add(int vect_len, float *d_vect1, float *d_vect2, float *d_sum){
int tid_x = blockIdx.x * blockDim.x + threadIdx.x ;
int tid_y= blockIdx.y * blockDim.y + threadIdx.y;
int tid= tid_x+tid_y;
if(tid<vect_len)
d_sum[tid]= d_vect1[tid] + d_vect2[tid];
}
__host__ void init_vects(int vect_len,float *h_vect1,float *h_vect2){
srand(time(NULL));
for (int i=0; i<vect_len; i++){
h_vect1[i] = rand();
h_vect2[i] = rand();
}
}
| 617d81135c51153aa94061203b248e505ed3fdf4.cu | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda.h>
__host__ void init_vects(int vect_len,float *h_vect1,float *h_vect2);
__global__ void vec_add(int vect_len, float *d_vect1, float *d_vect2, float *d_sum);
int main(int argc,char **argv)
{
cudaEvent_t start=0;
cudaEvent_t stop=0;
float time=0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int vect_len=1e8;
float h_vect1[vect_len], h_vect2[vect_len], h_sum[vect_len];
float *d_vect1, *d_vect2, *d_sum;
// initialization
init_vects(vect_len, h_vect1, h_vect2);
// tranfer vectors to global memory
cudaMalloc((void **)&d_vect1 , vect_len*sizeof(float) );
cudaMalloc((void **)&d_vect2 , vect_len*sizeof(float) );
cudaMalloc((void **)&d_sum , vect_len*sizeof(float) );
cudaMemcpy (d_vect1 , h_vect1 , vect_len*sizeof(float) , cudaMemcpyHostToDevice);
cudaMemcpy (d_vect2 , h_vect2 , vect_len*sizeof(float) , cudaMemcpyHostToDevice);
// determine block and grid size.
dim3 DimGrid(48829,2 ,1);
dim3 DimBlock(1024,1,1);
cudaEventRecord(start,0);
vec_add<<<DimGrid,DimBlock>>>(vect_len, d_vect1 ,d_vect2 ,d_sum);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(h_sum , d_sum , vect_len*sizeof(float) , cudaMemcpyDeviceToHost);
//Free the Device array
cudaFree (d_vect1);
cudaFree (d_vect2);
cudaFree (d_sum);
cudaEventElapsedTime(&time,start,stop);
printf("time of the Kernel %f \n",time );
printf("v1=%f ,, v2 =%f ,, sum=%f \n",h_vect1[0],h_vect2[0],h_sum[0]);
return 0;
}
__global__ void vec_add(int vect_len, float *d_vect1, float *d_vect2, float *d_sum){
int tid_x = blockIdx.x * blockDim.x + threadIdx.x ;
int tid_y= blockIdx.y * blockDim.y + threadIdx.y;
int tid= tid_x+tid_y;
if(tid<vect_len)
d_sum[tid]= d_vect1[tid] + d_vect2[tid];
}
__host__ void init_vects(int vect_len,float *h_vect1,float *h_vect2){
srand(time(NULL));
for (int i=0; i<vect_len; i++){
h_vect1[i] = rand();
h_vect2[i] = rand();
}
}
|
288ab3cc6defef94e7a2a8dee20cac5edd3cd1ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//http://www.nvidia.com/docs/IO/116711/sc11-cuda-c-basics.pdf
#include <stdint.h>
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image.h"
#include <time.h>
#include <cstdint>
#include <iostream>
#include <fstream>
#include "stb_image_write.h"
using namespace std;
__global__ void gpu_gray_lut(unsigned int* hist_counts_d, uint8_t* LUT_d, int size)
{
int myid = threadIdx.x;
LUT_d[myid] = static_cast<uint8_t>(round(255.0 * hist_counts_d[myid] / (size)));
}
inline bool exists(const std::string &name) {
ifstream f(name.c_str());
return f.good();
}
void cpu_histogram_equalization_grayscale(const string &path, const string &filename) {
if (!exists(path + filename)) {
cout << "File Doesn't Exist";
return;
}
int width, height, channels;
int desired_channels = 1;
unsigned int *hist_counts = new unsigned int[256];
uint8_t *LUT = new uint8_t[256];
unsigned int *hist_counts_d;
uint8_t *LUT_d;
memset(hist_counts, 0, sizeof hist_counts);
memset(LUT, 0, sizeof LUT);
uint8_t *gray_image = stbi_load((path + filename).c_str(), &width, &height, &channels, desired_channels);
//!histogram
for (int i = 0; i < width * height * desired_channels; i++)
hist_counts[gray_image[i]]++;
/* for (int i = 0; i < 256; i++)
cout << hist_counts[i] << ",";
cout << endl;*/
//!CDF
for (int i = 1; i < 256; i++)
hist_counts[i] += hist_counts[i - 1];
cout << "Final value of CDF: " << hist_counts[255] << endl;
//cuda code
hipMalloc((void **) &hist_counts_d, 256 * sizeof(int));
hipMemcpy(hist_counts_d, hist_counts, 256 * sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **) &LUT_d, 256* sizeof(int));
dim3 DimGrid(1, 1);
dim3 DimBlock(256, 1);
hipLaunchKernelGGL(( gpu_gray_lut), dim3(DimGrid),dim3(DimBlock) , 0, 0, hist_counts_d, LUT_d, width * height);
hipMemcpy(LUT, LUT_d, sizeof LUT, hipMemcpyDeviceToHost);
hipFree(hist_counts_d);
hipFree(LUT_d);
//!LUT cpu
//for (int i = 0; i < 256; i++)
// LUT[i] = static_cast<uint8_t>(round(255.0 * hist_counts[i] / (width * height)));
cout << "image read : " << width << " " << height << " " << channels<<endl;
//!from LUT
for (int i = 0; i < width * height * desired_channels; i++)
gray_image[i] = LUT[gray_image[i]];
stbi_write_jpg((path + "gpu_equ_" + filename).c_str(), width, height, 1, gray_image, 1000);
stbi_image_free(gray_image);
}
void cpu_histogram_equalization_rgb(const string &path, const string &filename) {
if (!exists(path + filename)) {
cout << "File Doesn't Exist";
return;
}
int width, height, channels;
int desired_channels = 3;
int hist_counts[3][256];
uint8_t LUT[3][256];
memset(hist_counts, 0, sizeof hist_counts);
memset(LUT, 0, sizeof LUT);
uint8_t *rgb_image = stbi_load((path + filename).c_str(), &width, &height, &channels, desired_channels);
//!histogram
for (int i = 0; i < width * height * desired_channels; i++)
hist_counts[i % 3][rgb_image[i]]++;
//!CDF
for (int i = 1; i < 256; i++)
for (int channel = 0; channel < desired_channels; ++channel)
hist_counts[channel][i] += hist_counts[channel][i - 1];
//!LUT cpu
for (int i = 0; i < 256; i++)
for (int channel = 0; channel < desired_channels; ++channel)
LUT[channel][i] = static_cast<uint8_t>(round(255.0 * hist_counts[channel][i] / (width * height)));
cout << "image read : " << width << " " << height << " " << channels<<endl;
//!from LUT
for (int i = 0; i < width * height * desired_channels; i += desired_channels)
for (int channel = 0; channel < desired_channels; ++channel)
rgb_image[i + channel] = LUT[channel][rgb_image[i + channel]];
stbi_write_jpg((path + "gpu_equ_" + filename).c_str(), width, height, desired_channels, rgb_image, 1000);
stbi_image_free(rgb_image);
}
int main(int argc, char **argv) {
cpu_histogram_equalization_grayscale("./images/", "in-grayscale.jpg");
cpu_histogram_equalization_rgb("./images/", "in-color.jpg");
return 0;
}
| 288ab3cc6defef94e7a2a8dee20cac5edd3cd1ac.cu | //http://www.nvidia.com/docs/IO/116711/sc11-cuda-c-basics.pdf
#include <stdint.h>
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image.h"
#include <time.h>
#include <cstdint>
#include <iostream>
#include <fstream>
#include "stb_image_write.h"
using namespace std;
__global__ void gpu_gray_lut(unsigned int* hist_counts_d, uint8_t* LUT_d, int size)
{
int myid = threadIdx.x;
LUT_d[myid] = static_cast<uint8_t>(round(255.0 * hist_counts_d[myid] / (size)));
}
inline bool exists(const std::string &name) {
ifstream f(name.c_str());
return f.good();
}
void cpu_histogram_equalization_grayscale(const string &path, const string &filename) {
if (!exists(path + filename)) {
cout << "File Doesn't Exist";
return;
}
int width, height, channels;
int desired_channels = 1;
unsigned int *hist_counts = new unsigned int[256];
uint8_t *LUT = new uint8_t[256];
unsigned int *hist_counts_d;
uint8_t *LUT_d;
memset(hist_counts, 0, sizeof hist_counts);
memset(LUT, 0, sizeof LUT);
uint8_t *gray_image = stbi_load((path + filename).c_str(), &width, &height, &channels, desired_channels);
//!histogram
for (int i = 0; i < width * height * desired_channels; i++)
hist_counts[gray_image[i]]++;
/* for (int i = 0; i < 256; i++)
cout << hist_counts[i] << ",";
cout << endl;*/
//!CDF
for (int i = 1; i < 256; i++)
hist_counts[i] += hist_counts[i - 1];
cout << "Final value of CDF: " << hist_counts[255] << endl;
//cuda code
cudaMalloc((void **) &hist_counts_d, 256 * sizeof(int));
cudaMemcpy(hist_counts_d, hist_counts, 256 * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **) &LUT_d, 256* sizeof(int));
dim3 DimGrid(1, 1);
dim3 DimBlock(256, 1);
gpu_gray_lut<<< DimGrid,DimBlock >>>(hist_counts_d, LUT_d, width * height);
cudaMemcpy(LUT, LUT_d, sizeof LUT, cudaMemcpyDeviceToHost);
cudaFree(hist_counts_d);
cudaFree(LUT_d);
//!LUT cpu
//for (int i = 0; i < 256; i++)
// LUT[i] = static_cast<uint8_t>(round(255.0 * hist_counts[i] / (width * height)));
cout << "image read : " << width << " " << height << " " << channels<<endl;
//!from LUT
for (int i = 0; i < width * height * desired_channels; i++)
gray_image[i] = LUT[gray_image[i]];
stbi_write_jpg((path + "gpu_equ_" + filename).c_str(), width, height, 1, gray_image, 1000);
stbi_image_free(gray_image);
}
void cpu_histogram_equalization_rgb(const string &path, const string &filename) {
if (!exists(path + filename)) {
cout << "File Doesn't Exist";
return;
}
int width, height, channels;
int desired_channels = 3;
int hist_counts[3][256];
uint8_t LUT[3][256];
memset(hist_counts, 0, sizeof hist_counts);
memset(LUT, 0, sizeof LUT);
uint8_t *rgb_image = stbi_load((path + filename).c_str(), &width, &height, &channels, desired_channels);
//!histogram
for (int i = 0; i < width * height * desired_channels; i++)
hist_counts[i % 3][rgb_image[i]]++;
//!CDF
for (int i = 1; i < 256; i++)
for (int channel = 0; channel < desired_channels; ++channel)
hist_counts[channel][i] += hist_counts[channel][i - 1];
//!LUT cpu
for (int i = 0; i < 256; i++)
for (int channel = 0; channel < desired_channels; ++channel)
LUT[channel][i] = static_cast<uint8_t>(round(255.0 * hist_counts[channel][i] / (width * height)));
cout << "image read : " << width << " " << height << " " << channels<<endl;
//!from LUT
for (int i = 0; i < width * height * desired_channels; i += desired_channels)
for (int channel = 0; channel < desired_channels; ++channel)
rgb_image[i + channel] = LUT[channel][rgb_image[i + channel]];
stbi_write_jpg((path + "gpu_equ_" + filename).c_str(), width, height, desired_channels, rgb_image, 1000);
stbi_image_free(rgb_image);
}
int main(int argc, char **argv) {
cpu_histogram_equalization_grayscale("./images/", "in-grayscale.jpg");
cpu_histogram_equalization_rgb("./images/", "in-color.jpg");
return 0;
}
|
17577dec705a3dbf4b3a5b81405468d63db4a5d3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
/*
* Refactor `loop` to be a CUDA Kernel. The new kernel should
* only do the work of 1 iteration of the original loop.
*/
void loop(int N)
{
for (int i = 0; i < N; ++i)
{
printf("This is iteration number %d\n", i);
}
}
int main()
{
/*
* When refactoring `loop` to launch as a kernel, be sure
* to use the execution configuration to control how many
* "iterations" to perform.
*
* For this exercise, only use 1 block of threads.
*/
int N = 10;
loop(N);
}
| 17577dec705a3dbf4b3a5b81405468d63db4a5d3.cu | #include <stdio.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
/*
* Refactor `loop` to be a CUDA Kernel. The new kernel should
* only do the work of 1 iteration of the original loop.
*/
void loop(int N)
{
for (int i = 0; i < N; ++i)
{
printf("This is iteration number %d\n", i);
}
}
int main()
{
/*
* When refactoring `loop` to launch as a kernel, be sure
* to use the execution configuration to control how many
* "iterations" to perform.
*
* For this exercise, only use 1 block of threads.
*/
int N = 10;
loop(N);
}
|
0f7bd5370717e27b6721084f2cc806adda62fa54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void kernel(int a, int b, int *c)
{
*c = a + b;
}
int main(void)
{
int c;
int *dev_c;
hipMalloc((void**)&dev_c, sizeof(int));
kernel << <1, 1 >> > (2, 7, dev_c);
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("2+7=%d\n",c);
hipFree(dev_c);
int res;
int *dev_res;
hipMalloc((void**)&dev_res, sizeof(int));
kernel << <1, 1 >> > (8, 2, dev_res);
hipMemcpy(&res, dev_res, sizeof(int), hipMemcpyDeviceToHost);
cout << *dev_res << endl;
cout << "8+2=" << res << endl;
return 0;
}
| 0f7bd5370717e27b6721084f2cc806adda62fa54.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void kernel(int a, int b, int *c)
{
*c = a + b;
}
int main(void)
{
int c;
int *dev_c;
cudaMalloc((void**)&dev_c, sizeof(int));
kernel << <1, 1 >> > (2, 7, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("2+7=%d\n",c);
cudaFree(dev_c);
int res;
int *dev_res;
cudaMalloc((void**)&dev_res, sizeof(int));
kernel << <1, 1 >> > (8, 2, dev_res);
cudaMemcpy(&res, dev_res, sizeof(int), cudaMemcpyDeviceToHost);
cout << *dev_res << endl;
cout << "8+2=" << res << endl;
return 0;
}
|
82c691fe3fa16f446e17a15762f7ea23bd736fa3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
__global__ void imageCombination(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst, const cv::cuda::PtrStep<uchar3> src2, int rows, int cols, int imageComb, float offSet, float scaleFactor)
{
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < cols && dst_y < rows)
{
if (imageComb == 0) {
dst(dst_y, dst_x).x = src(dst_y, dst_x).x + src2(dst_y, dst_x).x;
dst(dst_y, dst_x).y = src(dst_y, dst_x).y + src2(dst_y, dst_x).y;
dst(dst_y, dst_x).z = src(dst_y, dst_x).z + src2(dst_y, dst_x).z;
}
else if (imageComb == 1) {
dst(dst_y, dst_x).x = src(dst_y, dst_x).x - src2(dst_y, dst_x).x;
dst(dst_y, dst_x).y = src(dst_y, dst_x).y - src2(dst_y, dst_x).y;
dst(dst_y, dst_x).z = src(dst_y, dst_x).z - src2(dst_y, dst_x).z;
}
else if (imageComb == 2) {
dst(dst_y, dst_x).x = src(dst_y, dst_x).x * src2(dst_y, dst_x).x;
dst(dst_y, dst_x).y = src(dst_y, dst_x).y * src2(dst_y, dst_x).y;
dst(dst_y, dst_x).z = src(dst_y, dst_x).z * src2(dst_y, dst_x).z;
}
else if (imageComb == 3) {
dst(dst_y, dst_x).x = src2(dst_y, dst_x).x == 0 ? src(dst_y, dst_x).x : src(dst_y, dst_x).x / src2(dst_y, dst_x).x;
dst(dst_y, dst_x).y = src2(dst_y, dst_x).y == 0 ? src(dst_y, dst_x).y : src(dst_y, dst_x).y / src2(dst_y, dst_x).y;
dst(dst_y, dst_x).z = src2(dst_y, dst_x).z == 0 ? src(dst_y, dst_x).z : src(dst_y, dst_x).z / src2(dst_y, dst_x).z;
}
dst(dst_y, dst_x).x *= scaleFactor;
dst(dst_y, dst_x).y *= scaleFactor;
dst(dst_y, dst_x).z *= scaleFactor;
dst(dst_y, dst_x).x += offSet;
dst(dst_y, dst_x).y += offSet;
dst(dst_y, dst_x).z += offSet;
}
}
int divUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void imageCombCUDA(cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& src2, int dimX, int dimY, int imageComb, float offSet, float scaleFactor)
{
const dim3 block(dimX, dimY);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
imageCombination << <grid, block >> > (src, dst, src2, dst.rows, dst.cols, imageComb, offSet, scaleFactor);
} | 82c691fe3fa16f446e17a15762f7ea23bd736fa3.cu | #include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
__global__ void imageCombination(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst, const cv::cuda::PtrStep<uchar3> src2, int rows, int cols, int imageComb, float offSet, float scaleFactor)
{
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < cols && dst_y < rows)
{
if (imageComb == 0) {
dst(dst_y, dst_x).x = src(dst_y, dst_x).x + src2(dst_y, dst_x).x;
dst(dst_y, dst_x).y = src(dst_y, dst_x).y + src2(dst_y, dst_x).y;
dst(dst_y, dst_x).z = src(dst_y, dst_x).z + src2(dst_y, dst_x).z;
}
else if (imageComb == 1) {
dst(dst_y, dst_x).x = src(dst_y, dst_x).x - src2(dst_y, dst_x).x;
dst(dst_y, dst_x).y = src(dst_y, dst_x).y - src2(dst_y, dst_x).y;
dst(dst_y, dst_x).z = src(dst_y, dst_x).z - src2(dst_y, dst_x).z;
}
else if (imageComb == 2) {
dst(dst_y, dst_x).x = src(dst_y, dst_x).x * src2(dst_y, dst_x).x;
dst(dst_y, dst_x).y = src(dst_y, dst_x).y * src2(dst_y, dst_x).y;
dst(dst_y, dst_x).z = src(dst_y, dst_x).z * src2(dst_y, dst_x).z;
}
else if (imageComb == 3) {
dst(dst_y, dst_x).x = src2(dst_y, dst_x).x == 0 ? src(dst_y, dst_x).x : src(dst_y, dst_x).x / src2(dst_y, dst_x).x;
dst(dst_y, dst_x).y = src2(dst_y, dst_x).y == 0 ? src(dst_y, dst_x).y : src(dst_y, dst_x).y / src2(dst_y, dst_x).y;
dst(dst_y, dst_x).z = src2(dst_y, dst_x).z == 0 ? src(dst_y, dst_x).z : src(dst_y, dst_x).z / src2(dst_y, dst_x).z;
}
dst(dst_y, dst_x).x *= scaleFactor;
dst(dst_y, dst_x).y *= scaleFactor;
dst(dst_y, dst_x).z *= scaleFactor;
dst(dst_y, dst_x).x += offSet;
dst(dst_y, dst_x).y += offSet;
dst(dst_y, dst_x).z += offSet;
}
}
int divUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void imageCombCUDA(cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& src2, int dimX, int dimY, int imageComb, float offSet, float scaleFactor)
{
const dim3 block(dimX, dimY);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
imageCombination << <grid, block >> > (src, dst, src2, dst.rows, dst.cols, imageComb, offSet, scaleFactor);
} |
2e142764a0636dfd81b50e43c17588ef9beb4922.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/core/TensorBase.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/NumericLimits.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NumericUtils.h>
#include <c10/util/accumulate.h>
#include <ATen/hip/cub.cuh>
#include <ATen/native/hip/ScanKernels.h>
namespace at { namespace native {
template <typename integer>
constexpr inline integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template<typename scalar_t, typename idx_t, typename BinaryOperation>
__device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) {
if(!at::_isnan(rhs) && (at::_isnan(lhs) || !binary_op(rhs, lhs))) {
rhs = lhs;
rhs_idx = lhs_idx;
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename scalar_t, int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_,
int num_rows, int row_size,
scalar_t init, BinaryFunction binary_op) {
__shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x];
__shared__ int64_t ibuf[num_threads_y][2 * num_threads_x];
scalar_t* row_buf = vbuf[threadIdx.y];
int64_t* row_idx_buf = ibuf[threadIdx.y];
for (int block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
int row = block_row + threadIdx.y;
const scalar_t *row_self = self_ + row * row_size;
scalar_t *row_values = values_ + row * row_size;
int64_t *row_indices = indices_ + row * row_size;
scalar_t block_total = init;
int64_t block_idx_final = 0;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
int col1 = block_col + threadIdx.x;
int col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_self[col1];
row_idx_buf[threadIdx.x] = col1;
} else {
row_buf[threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_self[col2];
row_idx_buf[num_threads_x + threadIdx.x] = col2;
} else {
row_buf[num_threads_x + threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
int offset = (2 * threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Down-sweep.
for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
int offset = 2 * (threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size){
row_values[col1] = row_buf[threadIdx.x];
row_indices[col1] = row_idx_buf[threadIdx.x];
}
if (col2 < row_size) {
row_values[col2] = row_buf[num_threads_x + threadIdx.x];
row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x];
}
}
block_total = row_buf[2 * num_threads_x - 1];
block_idx_final = row_idx_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryFunction>
__global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, scalar_t init, BinaryFunction binary_op) {
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *self = self_ + orow * row_size * num_irows + irow;
scalar_t *values = values_ + orow * row_size * num_irows + irow;
int64_t *indices = indices_ + orow * row_size * num_irows + irow;
scalar_t out = init;
int64_t out_idx = 0;
for (auto col = decltype(row_size){0}; col < row_size; ++col) {
if(at::_isnan(*self) || (!at::_isnan(out) && binary_op(*self, out))) {
out = *self;
out_idx = col;
}
*values = out;
*indices = out_idx;
self += num_irows;
values += num_irows;
indices += num_irows;
}
}
}
}
void check_fits_in_unsigned(int64_t val, const char* name) {
constexpr auto umax = std::numeric_limits<uint32_t>::max();
TORCH_CHECK(
val >= 0 && val <= umax, name, " must fit in a 32-bit uint32_t value");
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim_with_indices(
const TensorBase& self, const TensorBase& values, const TensorBase& indices,
int dim, scalar_t init, BinaryFunction binary_op) {
int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
//for performance reasons, cuda kernels use uint32_t for loops over irows, orows and row,
//make sure that input is not bigger than supported by uint32_t
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
dim3 threads(::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(::min(maxGridDim, num_orows), ::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
hipLaunchKernelGGL(( tensor_kernel_scan_outer_dim_with_indices<scalar_t>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
__host__ void scan_innermost_dim_with_indices(
const TensorBase& self, const TensorBase& values, const TensorBase& indices,
scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int row_size = self.size(ndim - 1);
int num_rows = self.numel() / row_size;
dim3 threads(16, 32);
dim3 grid(::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y))));
hipLaunchKernelGGL(( tensor_kernel_scan_innermost_dim_with_indices<scalar_t, 16, 32>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_rows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim_with_indices(const TensorBase& self, const TensorBase& values, const TensorBase& indices, //int64_t dim) {
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
auto self_ = self.expect_contiguous();
TORCH_INTERNAL_ASSERT(values.is_contiguous() && indices.is_contiguous());
if (dim == ndim - 1) {
scan_innermost_dim_with_indices<scalar_t>(*self_, values, indices, init, binary_op);
} else {
scan_outer_dim_with_indices<scalar_t>(*self_, values, indices, dim, init, binary_op);
}
}
void launch_cummax_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummax_cuda", [&]() {
scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>());
});
}
void launch_cummin_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummin_cuda", [&]() {
scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>());
});
}
// TODO: The implementation of `tensor_kernel_scan_outer_dim` and
// `tensor_kernel_scan_innermost_dim` is similar to
// `tensor_kernel_scan_outer_dim_with_indices`
// `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to
// remove the duplication.
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to scan;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryOp>
__global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size,
const scalar_t init, BinaryOp binary_op)
{
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *src = src_ + orow * row_size * num_irows + irow;
scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow;
scalar_t acc = init;
for (uint32_t col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename T, int num_threads_x, int num_threads_y, class BinaryFunction>
__device__ void tensor_kernel_scan_innermost_dim_impl(T* row_buf, T *tgt_, T *src_,
const uint32_t num_rows, const uint32_t row_size,
T init, BinaryFunction binary_op){
for (uint32_t block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
uint32_t row = block_row + threadIdx.y;
T block_total = init;
T *row_src = src_ + row * row_size;
T *row_tgt = tgt_ + row * row_size;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (uint32_t block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
uint32_t col1 = block_col + threadIdx.x;
uint32_t col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (uint32_t s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
uint32_t offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (uint32_t s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
uint32_t offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<!c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
__shared__ T sbuf[num_threads_y][2 * num_threads_x];
T* row_buf = sbuf[threadIdx.y];
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
// As we cannot directly initialize shared array for complex types
// Reference:
// `error: initializer not allowed for __shared__ variable`
// We instead get the base scalar type and allocate twice number of
// elements required of base type and reinterpret them as complex.
using base_t = typename scalar_value_type<T>::type;
__shared__ base_t sbuf[num_threads_y][4 * num_threads_x];
T* row_buf = reinterpret_cast<T*>(sbuf[threadIdx.y]);
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim(const TensorBase& self, const TensorBase& result,
int dim, scalar_t init, BinaryFunction binary_op) {
const int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
dim3 threads(::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(::min(maxGridDim, num_orows), ::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
hipLaunchKernelGGL(( tensor_kernel_scan_outer_dim<scalar_t>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
void scan_innermost_dim(const TensorBase& self, const TensorBase& result,
scalar_t init, BinaryFunction binary_op) {
int64_t ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int64_t row_size = self.size(ndim - 1);
int64_t num_rows = self.numel() / row_size;
dim3 threads(16, 32);
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y})));
check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))");
check_fits_in_unsigned(row_size, "row_size");
hipLaunchKernelGGL(( tensor_kernel_scan_innermost_dim<scalar_t, 16, 32>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_rows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim(const TensorBase& self, const TensorBase& result,
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
auto self_ = self.expect_contiguous();
TORCH_INTERNAL_ASSERT(result.is_contiguous());
if (self.numel() == self.size(dim)) {
cuda::cub::inclusive_scan(self_->data_ptr<scalar_t>(), result.data_ptr<scalar_t>(), binary_op, self.numel());
} else if (dim == ndim - 1) {
scan_innermost_dim<scalar_t>(*self_, result, init, binary_op);
} else {
scan_outer_dim<scalar_t>(*self_, result, dim, init, binary_op);
}
}
void launch_logcumsumexp_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
const auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "logcumsumexp_cuda",
[&]() {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t init = -std::numeric_limits<scalar_t>::infinity();
auto log_add_exp = [] C10_HOST_DEVICE (const scalar_t x, const scalar_t y) -> scalar_t {
scalar_t min = at::_isnan(y) ? y : std::min<scalar_t>(x,y); //std::min returns first arg if one of the args is nan
scalar_t max = at::_isnan(y) ? y : std::max<scalar_t>(x,y); //std::max returns first arg if one of the args is nan
if (min != max || ::isfinite(static_cast<accscalar_t>(min))) {
// nan will be propagated here
return ::log1p(::exp(min - max)) + max;
} else {
// special case to correctly handle infinite inputs
return x;
}
};
scan_dim<scalar_t>(self, result, wrap_dim, init, log_add_exp);
});
}
void launch_cumsum_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "cumsum_cuda",
[&]() {
scalar_t init = 0;
scan_dim<scalar_t>(
self,
result,
dim,
init,
std::plus<scalar_t>());
});
}
void launch_cumprod_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "cumprod_cuda", [&]() {
scalar_t init = 1;
scan_dim<scalar_t>(
self,
result,
dim,
init,
std::multiplies<scalar_t>());
});
}
}} // namespace at::native
| 2e142764a0636dfd81b50e43c17588ef9beb4922.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/core/TensorBase.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NumericUtils.h>
#include <c10/util/accumulate.h>
#include <ATen/cuda/cub.cuh>
#include <ATen/native/cuda/ScanKernels.h>
namespace at { namespace native {
template <typename integer>
constexpr inline integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template<typename scalar_t, typename idx_t, typename BinaryOperation>
__device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) {
if(!at::_isnan(rhs) && (at::_isnan(lhs) || !binary_op(rhs, lhs))) {
rhs = lhs;
rhs_idx = lhs_idx;
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename scalar_t, int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_,
int num_rows, int row_size,
scalar_t init, BinaryFunction binary_op) {
__shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x];
__shared__ int64_t ibuf[num_threads_y][2 * num_threads_x];
scalar_t* row_buf = vbuf[threadIdx.y];
int64_t* row_idx_buf = ibuf[threadIdx.y];
for (int block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
int row = block_row + threadIdx.y;
const scalar_t *row_self = self_ + row * row_size;
scalar_t *row_values = values_ + row * row_size;
int64_t *row_indices = indices_ + row * row_size;
scalar_t block_total = init;
int64_t block_idx_final = 0;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
int col1 = block_col + threadIdx.x;
int col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_self[col1];
row_idx_buf[threadIdx.x] = col1;
} else {
row_buf[threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_self[col2];
row_idx_buf[num_threads_x + threadIdx.x] = col2;
} else {
row_buf[num_threads_x + threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
int offset = (2 * threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Down-sweep.
for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
int offset = 2 * (threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size){
row_values[col1] = row_buf[threadIdx.x];
row_indices[col1] = row_idx_buf[threadIdx.x];
}
if (col2 < row_size) {
row_values[col2] = row_buf[num_threads_x + threadIdx.x];
row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x];
}
}
block_total = row_buf[2 * num_threads_x - 1];
block_idx_final = row_idx_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryFunction>
__global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, scalar_t init, BinaryFunction binary_op) {
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *self = self_ + orow * row_size * num_irows + irow;
scalar_t *values = values_ + orow * row_size * num_irows + irow;
int64_t *indices = indices_ + orow * row_size * num_irows + irow;
scalar_t out = init;
int64_t out_idx = 0;
for (auto col = decltype(row_size){0}; col < row_size; ++col) {
if(at::_isnan(*self) || (!at::_isnan(out) && binary_op(*self, out))) {
out = *self;
out_idx = col;
}
*values = out;
*indices = out_idx;
self += num_irows;
values += num_irows;
indices += num_irows;
}
}
}
}
void check_fits_in_unsigned(int64_t val, const char* name) {
constexpr auto umax = std::numeric_limits<uint32_t>::max();
TORCH_CHECK(
val >= 0 && val <= umax, name, " must fit in a 32-bit uint32_t value");
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim_with_indices(
const TensorBase& self, const TensorBase& values, const TensorBase& indices,
int dim, scalar_t init, BinaryFunction binary_op) {
int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
//for performance reasons, cuda kernels use uint32_t for loops over irows, orows and row,
//make sure that input is not bigger than supported by uint32_t
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
dim3 threads(std::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
tensor_kernel_scan_outer_dim_with_indices<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
__host__ void scan_innermost_dim_with_indices(
const TensorBase& self, const TensorBase& values, const TensorBase& indices,
scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int row_size = self.size(ndim - 1);
int num_rows = self.numel() / row_size;
dim3 threads(16, 32);
dim3 grid(std::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y))));
tensor_kernel_scan_innermost_dim_with_indices<scalar_t, 16, 32><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_rows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim_with_indices(const TensorBase& self, const TensorBase& values, const TensorBase& indices, //int64_t dim) {
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
auto self_ = self.expect_contiguous();
TORCH_INTERNAL_ASSERT(values.is_contiguous() && indices.is_contiguous());
if (dim == ndim - 1) {
scan_innermost_dim_with_indices<scalar_t>(*self_, values, indices, init, binary_op);
} else {
scan_outer_dim_with_indices<scalar_t>(*self_, values, indices, dim, init, binary_op);
}
}
void launch_cummax_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummax_cuda", [&]() {
scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>());
});
}
void launch_cummin_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummin_cuda", [&]() {
scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>());
});
}
// TODO: The implementation of `tensor_kernel_scan_outer_dim` and
// `tensor_kernel_scan_innermost_dim` is similar to
// `tensor_kernel_scan_outer_dim_with_indices`
// `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to
// remove the duplication.
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to scan;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryOp>
__global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size,
const scalar_t init, BinaryOp binary_op)
{
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *src = src_ + orow * row_size * num_irows + irow;
scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow;
scalar_t acc = init;
for (uint32_t col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename T, int num_threads_x, int num_threads_y, class BinaryFunction>
__device__ void tensor_kernel_scan_innermost_dim_impl(T* row_buf, T *tgt_, T *src_,
const uint32_t num_rows, const uint32_t row_size,
T init, BinaryFunction binary_op){
for (uint32_t block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
uint32_t row = block_row + threadIdx.y;
T block_total = init;
T *row_src = src_ + row * row_size;
T *row_tgt = tgt_ + row * row_size;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (uint32_t block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
uint32_t col1 = block_col + threadIdx.x;
uint32_t col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (uint32_t s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
uint32_t offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (uint32_t s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
uint32_t offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<!c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
__shared__ T sbuf[num_threads_y][2 * num_threads_x];
T* row_buf = sbuf[threadIdx.y];
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
// As we cannot directly initialize shared array for complex types
// Reference:
// `error: initializer not allowed for __shared__ variable`
// We instead get the base scalar type and allocate twice number of
// elements required of base type and reinterpret them as complex.
using base_t = typename scalar_value_type<T>::type;
__shared__ base_t sbuf[num_threads_y][4 * num_threads_x];
T* row_buf = reinterpret_cast<T*>(sbuf[threadIdx.y]);
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim(const TensorBase& self, const TensorBase& result,
int dim, scalar_t init, BinaryFunction binary_op) {
const int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
dim3 threads(std::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
tensor_kernel_scan_outer_dim<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
void scan_innermost_dim(const TensorBase& self, const TensorBase& result,
scalar_t init, BinaryFunction binary_op) {
int64_t ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int64_t row_size = self.size(ndim - 1);
int64_t num_rows = self.numel() / row_size;
dim3 threads(16, 32);
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(std::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y})));
check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))");
check_fits_in_unsigned(row_size, "row_size");
tensor_kernel_scan_innermost_dim<scalar_t, 16, 32><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_rows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim(const TensorBase& self, const TensorBase& result,
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
auto self_ = self.expect_contiguous();
TORCH_INTERNAL_ASSERT(result.is_contiguous());
if (self.numel() == self.size(dim)) {
cuda::cub::inclusive_scan(self_->data_ptr<scalar_t>(), result.data_ptr<scalar_t>(), binary_op, self.numel());
} else if (dim == ndim - 1) {
scan_innermost_dim<scalar_t>(*self_, result, init, binary_op);
} else {
scan_outer_dim<scalar_t>(*self_, result, dim, init, binary_op);
}
}
void launch_logcumsumexp_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
const auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "logcumsumexp_cuda",
[&]() {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t init = -std::numeric_limits<scalar_t>::infinity();
auto log_add_exp = [] C10_HOST_DEVICE (const scalar_t x, const scalar_t y) -> scalar_t {
scalar_t min = at::_isnan(y) ? y : std::min<scalar_t>(x,y); //std::min returns first arg if one of the args is nan
scalar_t max = at::_isnan(y) ? y : std::max<scalar_t>(x,y); //std::max returns first arg if one of the args is nan
if (min != max || ::isfinite(static_cast<accscalar_t>(min))) {
// nan will be propagated here
return ::log1p(std::exp(min - max)) + max;
} else {
// special case to correctly handle infinite inputs
return x;
}
};
scan_dim<scalar_t>(self, result, wrap_dim, init, log_add_exp);
});
}
void launch_cumsum_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "cumsum_cuda",
[&]() {
scalar_t init = 0;
scan_dim<scalar_t>(
self,
result,
dim,
init,
std::plus<scalar_t>());
});
}
void launch_cumprod_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "cumprod_cuda", [&]() {
scalar_t init = 1;
scan_dim<scalar_t>(
self,
result,
dim,
init,
std::multiplies<scalar_t>());
});
}
}} // namespace at::native
|
a78e596a95f89781cce187e6a4ed1509ad7bc5d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + pad - size) / stride + 1;
int w = (in_w + pad - size) / stride + 1;
int c = in_c;
int area = (size-1)/stride;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
float d = 0;
int l, m;
for(l = -area; l < area+1; ++l){
for(m = -area; m < area+1; ++m){
int out_w = (j-w_offset)/stride + m;
int out_h = (i-h_offset)/stride + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
} | a78e596a95f89781cce187e6a4ed1509ad7bc5d5.cu | #include "includes.h"
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + pad - size) / stride + 1;
int w = (in_w + pad - size) / stride + 1;
int c = in_c;
int area = (size-1)/stride;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
float d = 0;
int l, m;
for(l = -area; l < area+1; ++l){
for(m = -area; m < area+1; ++m){
int out_w = (j-w_offset)/stride + m;
int out_h = (i-h_offset)/stride + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
} |
5ff81ad00828ca2b14ab8f91005bea5c089f3b30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_inc_potential.hpp"
#include "cuda_device_properties.hpp"
#include <gauxc/util/div_ceil.hpp>
#include "cuda_device_properties.hpp"
namespace GauXC {
namespace integrator {
namespace cuda {
using namespace GauXC::cuda;
#define WARP_X 16
#define WARP_Y 1
#define UNROLL_FACTOR 4
#define EFF_UNROLL 4
#define CUT_X 8
#define CUT_Y 8
template <typename T>
__global__ __launch_bounds__(1024, 1)
void inc_by_submat_combined_kernel( size_t ntasks,
XCTaskDevice<T>* device_tasks,
T* A,
size_t LDA,
const int block_y,
const int block_x ) {
const int batch_id = blockIdx.z;
auto& task = device_tasks[ batch_id ];
const auto* submat_cut_device = task.submat_cut;
const auto* submat_block_device = task.submat_block;
const auto LDAS = task.nbe;
auto* ASmall_device = task.nbe_scr;
//if( LDAS == LDAB ) return;
const int tid_xx = threadIdx.x % WARP_X;
const int tid_xy = threadIdx.x / WARP_X;
const int tid_yx = threadIdx.y % CUT_X;
const int tid_yy = threadIdx.y / CUT_X;
const int start_cut_y = submat_block_device[block_y];
const int end_cut_y = submat_block_device[block_y+1];
const int start_cut_x = submat_block_device[block_x];
const int end_cut_x = submat_block_device[block_x+1];
for( int i_cut = tid_yy + start_cut_y; i_cut < end_cut_y; i_cut += CUT_Y ) {
const int3 i_data = *((int3*)(submat_cut_device + 3*i_cut));
const int i_cut_first = i_data.x;
const int delta_i = i_data.y;
const int i_cut_small = i_data.z;
for( int j_cut = tid_yx + start_cut_x; j_cut < end_cut_x; j_cut += CUT_X ) {
const int3 j_data = *((int3*)(submat_cut_device + 3*j_cut));
const int j_cut_first = j_data.x;
const int delta_j = j_data.y;
const int j_cut_small = j_data.z;
auto* ASmall_begin = ASmall_device + i_cut_small + j_cut_small*LDAS;
auto* ABig_begin = A + i_cut_first + j_cut_first*LDA;
int J;
for( J = tid_xy; J < (delta_j / EFF_UNROLL) * EFF_UNROLL; J += EFF_UNROLL ) {
for( int I = tid_xx; I < delta_i; I += WARP_X ) {
double val[UNROLL_FACTOR];
double* address[UNROLL_FACTOR];
#pragma unroll
for (int k = 0; k < UNROLL_FACTOR; k++) {
val[k] = ASmall_begin[I + (J+k*WARP_Y)*LDAS];
address[k] = ABig_begin + I + (J+k*WARP_Y)*LDA;
}
#pragma unroll
for (int k = 0; k < UNROLL_FACTOR; k++) {
atomicAdd(address[k], val[k] );
}
}
}
for ( ; J < delta_j; J += WARP_Y) {
for( int I = tid_xx; I < delta_i; I += WARP_X ) {
atomicAdd(ABig_begin + I + J*LDA, ASmall_begin[I + J*LDAS] );
}
}
}
}
}
template <typename T>
void task_inc_potential( size_t ntasks,
XCTaskDevice<T>* device_tasks,
T* V_device,
size_t LDV,
hipStream_t stream ) {
dim3 threads(warp_size / 2, max_warps_per_thread_block * 2, 1), blocks(1,1,ntasks);
const int submat_block_size = get_submat_cut_block(LDV, 0);
for (int i = 0; i < util::div_ceil(LDV, submat_block_size); i++) {
for (int j = 0; j < util::div_ceil(LDV, submat_block_size); j++) {
hipLaunchKernelGGL(( inc_by_submat_combined_kernel), dim3(blocks), dim3(threads), 0, stream ,
ntasks, device_tasks, V_device, LDV, i, j
);
}
}
}
template
void task_inc_potential( size_t ntasks,
XCTaskDevice<double>* device_tasks,
double* V_device,
size_t LDV,
hipStream_t stream );
template <typename T>
__global__ void symmetrize_matrix_device( size_t nbf, size_t LDA, T* A ) {
const size_t block_size = warp_size;
__shared__ T buffer[block_size][block_size+1]; // Pad shared memory to resolve shared memory
const size_t num_blocks = ((nbf + block_size - 1) / block_size);
for (int i = blockIdx.x; i < num_blocks; i += gridDim.x) {
// TODO This could be load balanced if need be
const int i_coord = i * block_size;
for (int j = i; j < num_blocks; j++) {
const int j_coord = j * block_size;
// Read in block to buffer
// TODO These could be vector reads/writes if this becomes significant
if (i_coord + threadIdx.y < nbf && j_coord + threadIdx.x < nbf) {
buffer[threadIdx.y][threadIdx.x] = A[(i_coord + threadIdx.y) * LDA + j_coord + threadIdx.x];
}
__syncthreads();
// Write buffer
if (j_coord + threadIdx.y < nbf && i_coord + threadIdx.x < nbf) {
if ((j_coord != i_coord || threadIdx.x < threadIdx.y)) { // handles the diagonal block
A[(j_coord + threadIdx.y) * LDA + i_coord + threadIdx.x] = buffer[threadIdx.x][threadIdx.y];
}
}
__syncthreads();
}
}
}
template <typename T>
void symmetrize_matrix( size_t nbf, size_t LDV, T* V_device, hipStream_t stream) {
const size_t num_blocks = ((LDV + warp_size - 1) / warp_size);
// Warp size must equal max_warps_per_thread_block must equal 32
dim3 threads(warp_size, max_warps_per_thread_block), blocks(num_blocks);
hipLaunchKernelGGL(( symmetrize_matrix_device), dim3(blocks), dim3(threads), 0, stream, nbf, LDV, V_device);
}
template
void symmetrize_matrix( size_t nbf, size_t LDV, double* V_device, hipStream_t stream );
}
}
}
| 5ff81ad00828ca2b14ab8f91005bea5c089f3b30.cu | #include "cuda_inc_potential.hpp"
#include "cuda_device_properties.hpp"
#include <gauxc/util/div_ceil.hpp>
#include "cuda_device_properties.hpp"
namespace GauXC {
namespace integrator {
namespace cuda {
using namespace GauXC::cuda;
#define WARP_X 16
#define WARP_Y 1
#define UNROLL_FACTOR 4
#define EFF_UNROLL 4
#define CUT_X 8
#define CUT_Y 8
template <typename T>
__global__ __launch_bounds__(1024, 1)
void inc_by_submat_combined_kernel( size_t ntasks,
XCTaskDevice<T>* device_tasks,
T* A,
size_t LDA,
const int block_y,
const int block_x ) {
const int batch_id = blockIdx.z;
auto& task = device_tasks[ batch_id ];
const auto* submat_cut_device = task.submat_cut;
const auto* submat_block_device = task.submat_block;
const auto LDAS = task.nbe;
auto* ASmall_device = task.nbe_scr;
//if( LDAS == LDAB ) return;
const int tid_xx = threadIdx.x % WARP_X;
const int tid_xy = threadIdx.x / WARP_X;
const int tid_yx = threadIdx.y % CUT_X;
const int tid_yy = threadIdx.y / CUT_X;
const int start_cut_y = submat_block_device[block_y];
const int end_cut_y = submat_block_device[block_y+1];
const int start_cut_x = submat_block_device[block_x];
const int end_cut_x = submat_block_device[block_x+1];
for( int i_cut = tid_yy + start_cut_y; i_cut < end_cut_y; i_cut += CUT_Y ) {
const int3 i_data = *((int3*)(submat_cut_device + 3*i_cut));
const int i_cut_first = i_data.x;
const int delta_i = i_data.y;
const int i_cut_small = i_data.z;
for( int j_cut = tid_yx + start_cut_x; j_cut < end_cut_x; j_cut += CUT_X ) {
const int3 j_data = *((int3*)(submat_cut_device + 3*j_cut));
const int j_cut_first = j_data.x;
const int delta_j = j_data.y;
const int j_cut_small = j_data.z;
auto* ASmall_begin = ASmall_device + i_cut_small + j_cut_small*LDAS;
auto* ABig_begin = A + i_cut_first + j_cut_first*LDA;
int J;
for( J = tid_xy; J < (delta_j / EFF_UNROLL) * EFF_UNROLL; J += EFF_UNROLL ) {
for( int I = tid_xx; I < delta_i; I += WARP_X ) {
double val[UNROLL_FACTOR];
double* address[UNROLL_FACTOR];
#pragma unroll
for (int k = 0; k < UNROLL_FACTOR; k++) {
val[k] = ASmall_begin[I + (J+k*WARP_Y)*LDAS];
address[k] = ABig_begin + I + (J+k*WARP_Y)*LDA;
}
#pragma unroll
for (int k = 0; k < UNROLL_FACTOR; k++) {
atomicAdd(address[k], val[k] );
}
}
}
for ( ; J < delta_j; J += WARP_Y) {
for( int I = tid_xx; I < delta_i; I += WARP_X ) {
atomicAdd(ABig_begin + I + J*LDA, ASmall_begin[I + J*LDAS] );
}
}
}
}
}
template <typename T>
void task_inc_potential( size_t ntasks,
XCTaskDevice<T>* device_tasks,
T* V_device,
size_t LDV,
cudaStream_t stream ) {
dim3 threads(warp_size / 2, max_warps_per_thread_block * 2, 1), blocks(1,1,ntasks);
const int submat_block_size = get_submat_cut_block(LDV, 0);
for (int i = 0; i < util::div_ceil(LDV, submat_block_size); i++) {
for (int j = 0; j < util::div_ceil(LDV, submat_block_size); j++) {
inc_by_submat_combined_kernel<<< blocks, threads, 0, stream >>>(
ntasks, device_tasks, V_device, LDV, i, j
);
}
}
}
template
void task_inc_potential( size_t ntasks,
XCTaskDevice<double>* device_tasks,
double* V_device,
size_t LDV,
cudaStream_t stream );
template <typename T>
__global__ void symmetrize_matrix_device( size_t nbf, size_t LDA, T* A ) {
const size_t block_size = warp_size;
__shared__ T buffer[block_size][block_size+1]; // Pad shared memory to resolve shared memory
const size_t num_blocks = ((nbf + block_size - 1) / block_size);
for (int i = blockIdx.x; i < num_blocks; i += gridDim.x) {
// TODO This could be load balanced if need be
const int i_coord = i * block_size;
for (int j = i; j < num_blocks; j++) {
const int j_coord = j * block_size;
// Read in block to buffer
// TODO These could be vector reads/writes if this becomes significant
if (i_coord + threadIdx.y < nbf && j_coord + threadIdx.x < nbf) {
buffer[threadIdx.y][threadIdx.x] = A[(i_coord + threadIdx.y) * LDA + j_coord + threadIdx.x];
}
__syncthreads();
// Write buffer
if (j_coord + threadIdx.y < nbf && i_coord + threadIdx.x < nbf) {
if ((j_coord != i_coord || threadIdx.x < threadIdx.y)) { // handles the diagonal block
A[(j_coord + threadIdx.y) * LDA + i_coord + threadIdx.x] = buffer[threadIdx.x][threadIdx.y];
}
}
__syncthreads();
}
}
}
template <typename T>
void symmetrize_matrix( size_t nbf, size_t LDV, T* V_device, cudaStream_t stream) {
const size_t num_blocks = ((LDV + warp_size - 1) / warp_size);
// Warp size must equal max_warps_per_thread_block must equal 32
dim3 threads(warp_size, max_warps_per_thread_block), blocks(num_blocks);
symmetrize_matrix_device<<<blocks, threads, 0, stream>>>(nbf, LDV, V_device);
}
template
void symmetrize_matrix( size_t nbf, size_t LDV, double* V_device, cudaStream_t stream );
}
}
}
|
00d967abe0d4c30fa6a351770885839ea4601b73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hash_table.h"
template<int kd, int vd>
void createHashTable(int capacity) {
CUDA_SAFE_CALL(hipMemcpyToSymbol(table_capacity,
&capacity,
sizeof(unsigned int)));
float *values;
cu_malloc((void**)&values, capacity*vd*sizeof(float));
CUDA_SAFE_CALL(hipMemset((void *)values, 0, capacity*vd*sizeof(float)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(table_values,
&values,
sizeof(float *)));
int *entries;
cu_malloc((void **)&entries, capacity*2*sizeof(int));
CUDA_SAFE_CALL(hipMemset((void *)entries, -1, capacity*2*sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(table_entries,
&entries,
sizeof(unsigned int *)));
#ifdef LINEAR_D_MEMORY
char *ranks;
allocateCudaMemory((void**)&ranks, capacity*sizeof(char));
CUDA_SAFE_CALL(hipMemcpyToSymbol(table_rank,
&ranks,
sizeof(char *)));
signed short *zeros;
allocateCudaMemory((void**)&zeros, capacity*sizeof(signed short));
CUDA_SAFE_CALL(hipMemcpyToSymbol(table_zeros,
&zeros,
sizeof(char *)));
#else
signed short *keys;
cu_malloc((void **)&keys, capacity*kd*sizeof(signed short));
CUDA_SAFE_CALL(hipMemset((void *)keys, 0, capacity*kd*sizeof(signed short)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(table_keys,
&keys,
sizeof(unsigned int *)));
#endif
}
template <int vd> static void resetHashTable() {
CUDA_SAFE_CALL(hipMemset((void*)table_values, 0, table_capacity*vd*sizeof(float)));
}
void destroyHashTable() {
#ifndef LINEAR_D_MEMORY
cu_err_chk(hipFree(table_keys));
#endif
cu_err_chk(hipFree(table_values));
cu_err_chk(hipFree(table_entries));
}
template<int kd> __device__ __host__ static unsigned int hash(signed short *key) {
unsigned int k = 0;
for (int i = 0; i < kd; i++) {
k += key[i];
k = k * 2531011;
}
return k;
}
template<int kd> __device__ __host__ static unsigned int hash(int *key) {
unsigned int k = 0;
for (int i = 0; i < kd; i++) {
k += key[i];
k = k * 2531011;
}
return k;
}
template<int d> __device__ bool matchKey(int idx, signed short * key) {
bool match = true;
int slot = idx/(d+1), color = idx-slot*(d+1);
char *rank = table_rank + slot * (d+1);
signed short *zero = table_zeros + slot * (d+1);
for (int i = 0; i < d && match; i++) {
match = (key[i] == zero[i] + color - (rank[i] > d-color ? (d+1) : 0));
}
return match;
}
template<int d> __device__ static void generateKey(int idx, signed short * key) {
int slot = idx/(d+1), color = idx-slot*(d+1);
char *rank = table_rank + slot * (d+1);
signed short *zero = table_zeros + slot * (d+1);
for (int i = 0; i < d; i++) {
key[i] = zero[i] + color - (rank[i] > d-color ? (d+1) : 0);
}
}
float* swapHashTableValues(float *newValues) {
float * oldValues;
cu_err_chk(hipMemcpyFromSymbol(&oldValues,
table_values,
sizeof(float *)));
cu_err_chk(hipMemcpyToSymbol(table_values,
&newValues,
sizeof(float *)));
return oldValues;
}
template<int kd>
__device__ int hashTableInsert(unsigned int fh, signed short *key, unsigned int slot) {
int h = modHash(fh);
while (1) {
int *e = &table_entries[h];
// If the cell is empty (-1), lock it (-2)
int contents = atomicCAS(e, -1, -2);
if (contents == -2) {
// If it was locked already, move on to the next cell
} else if (contents == -1) {
// If it was empty, we successfully locked it. Write our key.
#ifndef LINEAR_D_MEMORY
for (int i = 0; i < kd; i++) {
table_keys[slot*kd+i] = key[i];
}
#endif
// Unlock
atomicExch(e, slot);
return h;
} else {
// The cell is unlocked and has a key in it, check if it matches
#ifdef LINEAR_D_MEMORY
if (matchKey<kd>(contents, key)) return h;
#else
bool match = true;
for (int i = 0; i < kd && match; i++) {
match = (table_keys[contents*kd+i] == key[i]);
}
if (match) return h;
#endif
}
// increment the bucket with wraparound
h++;
if (h == table_capacity*2) h = 0;
}
}
template<int kd>
__device__ int hashTableInsert(signed short *key, unsigned int slot) {
unsigned int myHash = hash<kd>(key);
return hashTableInsert<kd>(myHash, key, slot);
}
template<int kd> __device__
int hashTableRetrieveWithHash(unsigned int fh, signed short *key) {
int h = modHash(fh);
while (1) {
int *e = table_entries + h;
if (*e == -1) return -1;
#ifdef LINEAR_D_MEMORY
if (matchKey<kd>((*e), key)) return *e;
#else
bool match = true;
for (int i = 0; i < kd && match; i++) {
match = (table_keys[(*e)*kd+i] == key[i]);
}
if (match) return *e;
#endif
h++;
if (h == table_capacity*2) h = 0;
}
}
template<int kd>
__device__ int hashTableRetrieve(signed short *key) {
int h = modHash(hash<kd>(key));
while (1) {
int *e = table_entries + h;
if (*e == -1) return -1;
#ifdef LINEAR_D_MEMORY
if (matchKey<kd>((*e), key)) return *e;
#else
bool match = true;
for (int i = 0; i < kd && match; i++) {
match = (table_keys[(*e)*kd+i] == key[i]);
}
if (match) return *e;
#endif
h++;
if (h == table_capacity*2) h = 0;
}
}
| 00d967abe0d4c30fa6a351770885839ea4601b73.cu | #include "hash_table.h"
template<int kd, int vd>
void createHashTable(int capacity) {
CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_capacity,
&capacity,
sizeof(unsigned int)));
float *values;
cu_malloc((void**)&values, capacity*vd*sizeof(float));
CUDA_SAFE_CALL(cudaMemset((void *)values, 0, capacity*vd*sizeof(float)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_values,
&values,
sizeof(float *)));
int *entries;
cu_malloc((void **)&entries, capacity*2*sizeof(int));
CUDA_SAFE_CALL(cudaMemset((void *)entries, -1, capacity*2*sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_entries,
&entries,
sizeof(unsigned int *)));
#ifdef LINEAR_D_MEMORY
char *ranks;
allocateCudaMemory((void**)&ranks, capacity*sizeof(char));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_rank,
&ranks,
sizeof(char *)));
signed short *zeros;
allocateCudaMemory((void**)&zeros, capacity*sizeof(signed short));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_zeros,
&zeros,
sizeof(char *)));
#else
signed short *keys;
cu_malloc((void **)&keys, capacity*kd*sizeof(signed short));
CUDA_SAFE_CALL(cudaMemset((void *)keys, 0, capacity*kd*sizeof(signed short)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_keys,
&keys,
sizeof(unsigned int *)));
#endif
}
template <int vd> static void resetHashTable() {
CUDA_SAFE_CALL(cudaMemset((void*)table_values, 0, table_capacity*vd*sizeof(float)));
}
void destroyHashTable() {
#ifndef LINEAR_D_MEMORY
cu_err_chk(cudaFree(table_keys));
#endif
cu_err_chk(cudaFree(table_values));
cu_err_chk(cudaFree(table_entries));
}
template<int kd> __device__ __host__ static unsigned int hash(signed short *key) {
unsigned int k = 0;
for (int i = 0; i < kd; i++) {
k += key[i];
k = k * 2531011;
}
return k;
}
template<int kd> __device__ __host__ static unsigned int hash(int *key) {
unsigned int k = 0;
for (int i = 0; i < kd; i++) {
k += key[i];
k = k * 2531011;
}
return k;
}
template<int d> __device__ bool matchKey(int idx, signed short * key) {
bool match = true;
int slot = idx/(d+1), color = idx-slot*(d+1);
char *rank = table_rank + slot * (d+1);
signed short *zero = table_zeros + slot * (d+1);
for (int i = 0; i < d && match; i++) {
match = (key[i] == zero[i] + color - (rank[i] > d-color ? (d+1) : 0));
}
return match;
}
template<int d> __device__ static void generateKey(int idx, signed short * key) {
int slot = idx/(d+1), color = idx-slot*(d+1);
char *rank = table_rank + slot * (d+1);
signed short *zero = table_zeros + slot * (d+1);
for (int i = 0; i < d; i++) {
key[i] = zero[i] + color - (rank[i] > d-color ? (d+1) : 0);
}
}
float* swapHashTableValues(float *newValues) {
float * oldValues;
cu_err_chk(cudaMemcpyFromSymbol(&oldValues,
table_values,
sizeof(float *)));
cu_err_chk(cudaMemcpyToSymbol(table_values,
&newValues,
sizeof(float *)));
return oldValues;
}
template<int kd>
__device__ int hashTableInsert(unsigned int fh, signed short *key, unsigned int slot) {
int h = modHash(fh);
while (1) {
int *e = &table_entries[h];
// If the cell is empty (-1), lock it (-2)
int contents = atomicCAS(e, -1, -2);
if (contents == -2) {
// If it was locked already, move on to the next cell
} else if (contents == -1) {
// If it was empty, we successfully locked it. Write our key.
#ifndef LINEAR_D_MEMORY
for (int i = 0; i < kd; i++) {
table_keys[slot*kd+i] = key[i];
}
#endif
// Unlock
atomicExch(e, slot);
return h;
} else {
// The cell is unlocked and has a key in it, check if it matches
#ifdef LINEAR_D_MEMORY
if (matchKey<kd>(contents, key)) return h;
#else
bool match = true;
for (int i = 0; i < kd && match; i++) {
match = (table_keys[contents*kd+i] == key[i]);
}
if (match) return h;
#endif
}
// increment the bucket with wraparound
h++;
if (h == table_capacity*2) h = 0;
}
}
template<int kd>
__device__ int hashTableInsert(signed short *key, unsigned int slot) {
unsigned int myHash = hash<kd>(key);
return hashTableInsert<kd>(myHash, key, slot);
}
template<int kd> __device__
int hashTableRetrieveWithHash(unsigned int fh, signed short *key) {
int h = modHash(fh);
while (1) {
int *e = table_entries + h;
if (*e == -1) return -1;
#ifdef LINEAR_D_MEMORY
if (matchKey<kd>((*e), key)) return *e;
#else
bool match = true;
for (int i = 0; i < kd && match; i++) {
match = (table_keys[(*e)*kd+i] == key[i]);
}
if (match) return *e;
#endif
h++;
if (h == table_capacity*2) h = 0;
}
}
template<int kd>
__device__ int hashTableRetrieve(signed short *key) {
int h = modHash(hash<kd>(key));
while (1) {
int *e = table_entries + h;
if (*e == -1) return -1;
#ifdef LINEAR_D_MEMORY
if (matchKey<kd>((*e), key)) return *e;
#else
bool match = true;
for (int i = 0; i < kd && match; i++) {
match = (table_keys[(*e)*kd+i] == key[i]);
}
if (match) return *e;
#endif
h++;
if (h == table_capacity*2) h = 0;
}
}
|
d2d232aaa4132aff835542a0b49ba1801f93f24e.hip | // !!! This is a file automatically generated by hipify!!!
//====================================================================================================100
// UPDATE
//====================================================================================================100
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#define fp float
#define NUMBER_THREADS 512
//#include "define.c"
#include "compress_kernel.cu"
#include "extract_kernel.hip"
#include "graphics.c"
#include "prepare_kernel.cu"
#include "reduce_kernel.hip"
#include "resize.c"
#include "srad2_kernel.cu"
#include "srad_kernel.hip"
#include "timer.c"
#include "../benchmark_common.h"
#include "device.c" // (in library path specified to compiler) needed by for device functions
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
// int main(int argc, char *argv []){
int main_SRAD(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
long long time11;
long long time12;
time0 = get_time_srad();
// inputs image, input paramenters
fp* image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp* image; // input image
int Nr, Nc; // IMAGE nbr of rows/cols/elements
long Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1, r2, c1, c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// surrounding pixel indicies
int *iN, *iS, *jE, *jW;
// counters
int iter; // primary loop
long i, j; // image row/col
// memory sizes
int mem_size_i;
int mem_size_j;
int mem_size_single;
//================================================================================80
// GPU VARIABLES
//================================================================================80
// CUDA kernel execution parameters
dim3 threads;
int blocks_x;
dim3 blocks;
dim3 blocks2;
dim3 blocks3;
// memory sizes
int mem_size; // matrix memory size
// HOST
int no;
int mul;
fp total;
fp total2;
fp meanROI;
fp meanROI2;
fp varROI;
fp q0sqr;
// DEVICE
fp* d_sums; // partial sum
fp* d_sums2;
int* d_iN;
int* d_iS;
int* d_jE;
int* d_jW;
fp* d_dN;
fp* d_dS;
fp* d_dW;
fp* d_dE;
fp* d_I; // input IMAGE on DEVICE
fp* d_c;
time1 = get_time_srad();
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
/*if(argc != 5){
printf("ERROR: wrong number of arguments\n");
return 0;
}
else{
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502
in
the original image
Nc = atoi(argv[4]); // it is 458
in
the original image
}*/
niter = 100;
lambda = 0.5;
Nr = 502;
Nc = 458;
time2 = get_time_srad();
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp*)malloc(sizeof(fp) * image_ori_elem);
read_graphics("srad/image.pgm", image_ori, image_ori_rows, image_ori_cols, 1);
time3 = get_time_srad();
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr * Nc;
image = (fp*)malloc(sizeof(fp) * Ne);
resize(image_ori, image_ori_rows, image_ori_cols, image, Nr, Nc, 1);
time4 = get_time_srad();
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI = (r2 - r1 + 1) * (c2 - c1 + 1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
mem_size_i = sizeof(int) * Nr; //
iN = (int*)malloc(mem_size_i); // north surrounding element
iS = (int*)malloc(mem_size_i); // south surrounding element
mem_size_j = sizeof(int) * Nc; //
jW = (int*)malloc(mem_size_j); // west surrounding element
jE = (int*)malloc(mem_size_j); // east surrounding element
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
for (i = 0; i < Nr; i++) {
iN[i] = i - 1; // holds index of IMAGE row above
iS[i] = i + 1; // holds index of IMAGE row below
}
for (j = 0; j < Nc; j++) {
jW[j] = j - 1; // holds index of IMAGE column on the left
jE[j] = j + 1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of
// image
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr - 1] = Nr - 1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc - 1] = Nc - 1; // changes IMAGE rightmost column index from Nc to Nc-1
//================================================================================80
// GPU SETUP
//================================================================================80
// allocate memory for entire IMAGE on DEVICE
mem_size =
sizeof(fp) * Ne; // get the size of float representation of input IMAGE
hipMalloc((void**)&d_I, mem_size); //
// allocate memory for coordinates on DEVICE
hipMalloc((void**)&d_iN, mem_size_i); //
hipMemcpyAsync(d_iN, iN, mem_size_i, hipMemcpyHostToDevice, stream_app); //
hipMalloc((void**)&d_iS, mem_size_i); //
hipMemcpyAsync(d_iS, iS, mem_size_i, hipMemcpyHostToDevice, stream_app); //
hipMalloc((void**)&d_jE, mem_size_j); //
hipMemcpyAsync(d_jE, jE, mem_size_j, hipMemcpyHostToDevice, stream_app); //
hipMalloc((void**)&d_jW, mem_size_j); //
hipMemcpyAsync(d_jW, jW, mem_size_j, hipMemcpyHostToDevice, stream_app); //
// allocate memory for partial sums on DEVICE
hipMalloc((void**)&d_sums, mem_size); //
hipMalloc((void**)&d_sums2, mem_size); //
// allocate memory for derivatives
hipMalloc((void**)&d_dN, mem_size); //
hipMalloc((void**)&d_dS, mem_size); //
hipMalloc((void**)&d_dW, mem_size); //
hipMalloc((void**)&d_dE, mem_size); //
// allocate memory for coefficient on DEVICE
hipMalloc((void**)&d_c, mem_size); //
checkCUDAError("setup");
//================================================================================80
// KERNEL EXECUTION PARAMETERS
//================================================================================80
// all kernels operating on entire matrix
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = Ne / threads.x;
if (Ne % threads.x !=
0) { // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
time5 = get_time_srad();
//================================================================================80
// COPY INPUT TO CPU
//================================================================================80
hipMemcpyAsync(d_I, image, mem_size, hipMemcpyHostToDevice, stream_app);
time6 = get_time_srad();
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
hipLaunchKernelGGL(( extract), dim3(blocks), dim3(threads), 0, stream_app, Ne, d_I);
checkCUDAError("extract");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
time7 = get_time_srad();
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
pthread_mutex_lock(mutexapp);
// execute main loop
for (iter = 0; iter < niter;
iter++) { // do for the number of iterations input parameter
// printf("%d ", iter);
// fflush(NULL);
// execute square kernel
hipLaunchKernelGGL(( prepare), dim3(blocks), dim3(threads), 0, stream_app, Ne, d_I, d_sums, d_sums2);
checkCUDAError("prepare");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
// performs subsequent reductions of sums
blocks2.x = blocks.x; // original number of blocks
blocks2.y = blocks.y;
no = Ne; // original number of sum elements
mul = 1; // original multiplier
while (blocks2.x != 0) {
checkCUDAError("before reduce");
pthread_mutex_lock(mutexapp);
// run kernel
hipLaunchKernelGGL(( reduce), dim3(blocks2), dim3(threads), 0, stream_app, Ne, no, mul, d_sums, d_sums2);
checkCUDAError("reduce");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
// update execution parameters
no = blocks2.x; // get current number of elements
if (blocks2.x == 1) {
blocks2.x = 0;
} else {
mul = mul * NUMBER_THREADS; // update the increment
blocks_x = blocks2.x / threads.x; // number of blocks
if (blocks2.x % threads.x !=
0) { // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks2.x = blocks_x;
blocks2.y = 1;
}
checkCUDAError("after reduce");
}
checkCUDAError("before copy sum");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
// copy total sums to device
mem_size_single = sizeof(fp) * 1;
hipMemcpyAsync(&total, d_sums, mem_size_single, hipMemcpyDeviceToHost,
stream_app);
hipMemcpyAsync(&total2, d_sums2, mem_size_single, hipMemcpyDeviceToHost,
stream_app);
checkCUDAError("copy sum");
// calculate statistics
meanROI = total / fp(NeROI); // gets mean (average) value of element in ROI
meanROI2 = meanROI * meanROI; //
varROI = (total2 / fp(NeROI)) - meanROI2; // gets variance of ROI
q0sqr = varROI / meanROI2; // gets standard deviation of ROI
pthread_mutex_lock(mutexapp);
// execute srad kernel
hipLaunchKernelGGL(( srad), dim3(blocks), dim3(threads), 0, stream_app,
lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
q0sqr, // standard deviation of ROI
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad");
// execute srad2 kernel
hipLaunchKernelGGL(( srad2), dim3(blocks), dim3(threads), 0, stream_app,
lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad2");
}
// printf("\n");
time8 = get_time_srad();
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
hipLaunchKernelGGL(( compress), dim3(blocks), dim3(threads), 0, stream_app, Ne, d_I);
checkCUDAError("compress");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
time9 = get_time_srad();
//================================================================================80
// COPY RESULTS BACK TO CPU
//================================================================================80
hipMemcpyAsync(image, d_I, mem_size, hipMemcpyDeviceToHost, stream_app);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
checkCUDAError("copy back");
time10 = get_time_srad();
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics("image_out.pgm", image, Nr, Nc, 1, 255);
time11 = get_time_srad();
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(image);
free(iN);
free(iS);
free(jW);
free(jE);
hipFree(d_I);
hipFree(d_c);
hipFree(d_iN);
hipFree(d_iS);
hipFree(d_jE);
hipFree(d_jW);
hipFree(d_dN);
hipFree(d_dS);
hipFree(d_dE);
hipFree(d_dW);
hipFree(d_sums);
hipFree(d_sums2);
time12 = get_time_srad();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%15.12f s, %15.12f % : SETUP VARIABLES\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : READ COMMAND LINE PARAMETERS\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : READ IMAGE FROM FILE\n",
(float)(time3 - time2) / 1000000,
(float)(time3 - time2) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : RESIZE IMAGE\n",
(float)(time4 - time3) / 1000000,
(float)(time4 - time3) / (float)(time12 - time0) * 100);
printf(
"%15.12f s, %15.12f % : GPU DRIVER INIT, CPU/GPU SETUP, MEMORY "
"ALLOCATION\n",
(float)(time5 - time4) / 1000000,
(float)(time5 - time4) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO CPU->GPU\n",
(float)(time6 - time5) / 1000000,
(float)(time6 - time5) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : EXTRACT IMAGE\n",
(float)(time7 - time6) / 1000000,
(float)(time7 - time6) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : COMPUTE\n", (float)(time8 - time7) / 1000000,
(float)(time8 - time7) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : COMPRESS IMAGE\n",
(float)(time9 - time8) / 1000000,
(float)(time9 - time8) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO GPU->CPU\n",
(float)(time10 - time9) / 1000000,
(float)(time10 - time9) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : SAVE IMAGE INTO FILE\n",
(float)(time11 - time10) / 1000000,
(float)(time11 - time10) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : FREE MEMORY\n",
(float)(time12 - time11) / 1000000,
(float)(time12 - time11) / (float)(time12 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time12 - time0) / 1000000);
return 0;
}
//====================================================================================================100
// END OF FILE
//====================================================================================================100
| d2d232aaa4132aff835542a0b49ba1801f93f24e.cu | //====================================================================================================100
// UPDATE
//====================================================================================================100
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <cuda.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#define fp float
#define NUMBER_THREADS 512
//#include "define.c"
#include "compress_kernel.cu"
#include "extract_kernel.cu"
#include "graphics.c"
#include "prepare_kernel.cu"
#include "reduce_kernel.cu"
#include "resize.c"
#include "srad2_kernel.cu"
#include "srad_kernel.cu"
#include "timer.c"
#include "../benchmark_common.h"
#include "device.c" // (in library path specified to compiler) needed by for device functions
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
// int main(int argc, char *argv []){
int main_SRAD(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
long long time11;
long long time12;
time0 = get_time_srad();
// inputs image, input paramenters
fp* image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp* image; // input image
int Nr, Nc; // IMAGE nbr of rows/cols/elements
long Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1, r2, c1, c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// surrounding pixel indicies
int *iN, *iS, *jE, *jW;
// counters
int iter; // primary loop
long i, j; // image row/col
// memory sizes
int mem_size_i;
int mem_size_j;
int mem_size_single;
//================================================================================80
// GPU VARIABLES
//================================================================================80
// CUDA kernel execution parameters
dim3 threads;
int blocks_x;
dim3 blocks;
dim3 blocks2;
dim3 blocks3;
// memory sizes
int mem_size; // matrix memory size
// HOST
int no;
int mul;
fp total;
fp total2;
fp meanROI;
fp meanROI2;
fp varROI;
fp q0sqr;
// DEVICE
fp* d_sums; // partial sum
fp* d_sums2;
int* d_iN;
int* d_iS;
int* d_jE;
int* d_jW;
fp* d_dN;
fp* d_dS;
fp* d_dW;
fp* d_dE;
fp* d_I; // input IMAGE on DEVICE
fp* d_c;
time1 = get_time_srad();
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
/*if(argc != 5){
printf("ERROR: wrong number of arguments\n");
return 0;
}
else{
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502
in
the original image
Nc = atoi(argv[4]); // it is 458
in
the original image
}*/
niter = 100;
lambda = 0.5;
Nr = 502;
Nc = 458;
time2 = get_time_srad();
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp*)malloc(sizeof(fp) * image_ori_elem);
read_graphics("srad/image.pgm", image_ori, image_ori_rows, image_ori_cols, 1);
time3 = get_time_srad();
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr * Nc;
image = (fp*)malloc(sizeof(fp) * Ne);
resize(image_ori, image_ori_rows, image_ori_cols, image, Nr, Nc, 1);
time4 = get_time_srad();
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI = (r2 - r1 + 1) * (c2 - c1 + 1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
mem_size_i = sizeof(int) * Nr; //
iN = (int*)malloc(mem_size_i); // north surrounding element
iS = (int*)malloc(mem_size_i); // south surrounding element
mem_size_j = sizeof(int) * Nc; //
jW = (int*)malloc(mem_size_j); // west surrounding element
jE = (int*)malloc(mem_size_j); // east surrounding element
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
for (i = 0; i < Nr; i++) {
iN[i] = i - 1; // holds index of IMAGE row above
iS[i] = i + 1; // holds index of IMAGE row below
}
for (j = 0; j < Nc; j++) {
jW[j] = j - 1; // holds index of IMAGE column on the left
jE[j] = j + 1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of
// image
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr - 1] = Nr - 1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc - 1] = Nc - 1; // changes IMAGE rightmost column index from Nc to Nc-1
//================================================================================80
// GPU SETUP
//================================================================================80
// allocate memory for entire IMAGE on DEVICE
mem_size =
sizeof(fp) * Ne; // get the size of float representation of input IMAGE
cudaMalloc((void**)&d_I, mem_size); //
// allocate memory for coordinates on DEVICE
cudaMalloc((void**)&d_iN, mem_size_i); //
cudaMemcpyAsync(d_iN, iN, mem_size_i, cudaMemcpyHostToDevice, stream_app); //
cudaMalloc((void**)&d_iS, mem_size_i); //
cudaMemcpyAsync(d_iS, iS, mem_size_i, cudaMemcpyHostToDevice, stream_app); //
cudaMalloc((void**)&d_jE, mem_size_j); //
cudaMemcpyAsync(d_jE, jE, mem_size_j, cudaMemcpyHostToDevice, stream_app); //
cudaMalloc((void**)&d_jW, mem_size_j); //
cudaMemcpyAsync(d_jW, jW, mem_size_j, cudaMemcpyHostToDevice, stream_app); //
// allocate memory for partial sums on DEVICE
cudaMalloc((void**)&d_sums, mem_size); //
cudaMalloc((void**)&d_sums2, mem_size); //
// allocate memory for derivatives
cudaMalloc((void**)&d_dN, mem_size); //
cudaMalloc((void**)&d_dS, mem_size); //
cudaMalloc((void**)&d_dW, mem_size); //
cudaMalloc((void**)&d_dE, mem_size); //
// allocate memory for coefficient on DEVICE
cudaMalloc((void**)&d_c, mem_size); //
checkCUDAError("setup");
//================================================================================80
// KERNEL EXECUTION PARAMETERS
//================================================================================80
// all kernels operating on entire matrix
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = Ne / threads.x;
if (Ne % threads.x !=
0) { // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
time5 = get_time_srad();
//================================================================================80
// COPY INPUT TO CPU
//================================================================================80
cudaMemcpyAsync(d_I, image, mem_size, cudaMemcpyHostToDevice, stream_app);
time6 = get_time_srad();
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
extract<<<blocks, threads, 0, stream_app>>>(Ne, d_I);
checkCUDAError("extract");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
time7 = get_time_srad();
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
pthread_mutex_lock(mutexapp);
// execute main loop
for (iter = 0; iter < niter;
iter++) { // do for the number of iterations input parameter
// printf("%d ", iter);
// fflush(NULL);
// execute square kernel
prepare<<<blocks, threads, 0, stream_app>>>(Ne, d_I, d_sums, d_sums2);
checkCUDAError("prepare");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
// performs subsequent reductions of sums
blocks2.x = blocks.x; // original number of blocks
blocks2.y = blocks.y;
no = Ne; // original number of sum elements
mul = 1; // original multiplier
while (blocks2.x != 0) {
checkCUDAError("before reduce");
pthread_mutex_lock(mutexapp);
// run kernel
reduce<<<blocks2, threads, 0, stream_app>>>(Ne, no, mul, d_sums, d_sums2);
checkCUDAError("reduce");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
// update execution parameters
no = blocks2.x; // get current number of elements
if (blocks2.x == 1) {
blocks2.x = 0;
} else {
mul = mul * NUMBER_THREADS; // update the increment
blocks_x = blocks2.x / threads.x; // number of blocks
if (blocks2.x % threads.x !=
0) { // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks2.x = blocks_x;
blocks2.y = 1;
}
checkCUDAError("after reduce");
}
checkCUDAError("before copy sum");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
// copy total sums to device
mem_size_single = sizeof(fp) * 1;
cudaMemcpyAsync(&total, d_sums, mem_size_single, cudaMemcpyDeviceToHost,
stream_app);
cudaMemcpyAsync(&total2, d_sums2, mem_size_single, cudaMemcpyDeviceToHost,
stream_app);
checkCUDAError("copy sum");
// calculate statistics
meanROI = total / fp(NeROI); // gets mean (average) value of element in ROI
meanROI2 = meanROI * meanROI; //
varROI = (total2 / fp(NeROI)) - meanROI2; // gets variance of ROI
q0sqr = varROI / meanROI2; // gets standard deviation of ROI
pthread_mutex_lock(mutexapp);
// execute srad kernel
srad<<<blocks, threads, 0, stream_app>>>(
lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
q0sqr, // standard deviation of ROI
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad");
// execute srad2 kernel
srad2<<<blocks, threads, 0, stream_app>>>(
lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad2");
}
// printf("\n");
time8 = get_time_srad();
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
compress<<<blocks, threads, 0, stream_app>>>(Ne, d_I);
checkCUDAError("compress");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
time9 = get_time_srad();
//================================================================================80
// COPY RESULTS BACK TO CPU
//================================================================================80
cudaMemcpyAsync(image, d_I, mem_size, cudaMemcpyDeviceToHost, stream_app);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
checkCUDAError("copy back");
time10 = get_time_srad();
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics("image_out.pgm", image, Nr, Nc, 1, 255);
time11 = get_time_srad();
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(image);
free(iN);
free(iS);
free(jW);
free(jE);
cudaFree(d_I);
cudaFree(d_c);
cudaFree(d_iN);
cudaFree(d_iS);
cudaFree(d_jE);
cudaFree(d_jW);
cudaFree(d_dN);
cudaFree(d_dS);
cudaFree(d_dE);
cudaFree(d_dW);
cudaFree(d_sums);
cudaFree(d_sums2);
time12 = get_time_srad();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%15.12f s, %15.12f % : SETUP VARIABLES\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : READ COMMAND LINE PARAMETERS\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : READ IMAGE FROM FILE\n",
(float)(time3 - time2) / 1000000,
(float)(time3 - time2) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : RESIZE IMAGE\n",
(float)(time4 - time3) / 1000000,
(float)(time4 - time3) / (float)(time12 - time0) * 100);
printf(
"%15.12f s, %15.12f % : GPU DRIVER INIT, CPU/GPU SETUP, MEMORY "
"ALLOCATION\n",
(float)(time5 - time4) / 1000000,
(float)(time5 - time4) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO CPU->GPU\n",
(float)(time6 - time5) / 1000000,
(float)(time6 - time5) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : EXTRACT IMAGE\n",
(float)(time7 - time6) / 1000000,
(float)(time7 - time6) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : COMPUTE\n", (float)(time8 - time7) / 1000000,
(float)(time8 - time7) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : COMPRESS IMAGE\n",
(float)(time9 - time8) / 1000000,
(float)(time9 - time8) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO GPU->CPU\n",
(float)(time10 - time9) / 1000000,
(float)(time10 - time9) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : SAVE IMAGE INTO FILE\n",
(float)(time11 - time10) / 1000000,
(float)(time11 - time10) / (float)(time12 - time0) * 100);
printf("%15.12f s, %15.12f % : FREE MEMORY\n",
(float)(time12 - time11) / 1000000,
(float)(time12 - time11) / (float)(time12 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time12 - time0) / 1000000);
return 0;
}
//====================================================================================================100
// END OF FILE
//====================================================================================================100
|
e24dc3a3e21ef23c4d490067a36dcb7412c32c14.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates several CUTLASS utilities in the context of a mixed-precision
floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing tensor and matrix
memory allocations, initializing and comparing results, and computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions appear
namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have
both host-side and device-side implementations, and the choice to use device-side initialization
and host-side verification in this example was arbitrary.
cutlass::half_t
This is a host-only implementation of a half-precision floating-point type. It requires no
specialized hardware support from the CPU and emulates arithmetic operations. Device-side code
should use CUDA's `half` type.
cutlass::HostMatrix<>
This template class simplifies the creation of a rank=2 tensor with either a column-major or
row-major layout in memory.
This class offers methods device_view() and host_view() to provide TensorView objects for
device- and host-side memory allocations.
cutlass::reference::device::TensorInitialize()
This template function initializes the elements of a tensor according to either a procedural
definition or a random distribution. The function in namespace `cutlass::reference::device::`
uses a CUDA kernel to perform this initialization, relying on CURAND to compute random numbers.
cutlass::reference::host::Gemm()
This template function computes the general matrix product. This template supports unique
data types for each matrix operand, the internal accumulation type, and the scalar parameters
alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// CUTLASS includes needed for mixed-precision GEMM kernel
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/fp16_sgemm_traits.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "tools/util/tensor_view_io.h"
// Defines cutlass::HostMatrix<>
#include "tools/util/host_matrix.h"
// Defines cutlass::half_t
#include "tools/util/half.h"
// Defines cutlass::reference::device::TensorInitialize()
#include "tools/util/reference/device/tensor_elementwise.h"
// Defines cutlass::reference::host::TensorEquals()
#include "tools/util/reference/host/tensor_elementwise.h"
// Defines cutlass::reference::host::Gemm()
#include "tools/util/reference/host/gemm.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
hipError_t Cutlass_FP16_SgemmNN(
int M,
int N,
int K,
cutlass::half_t alpha,
half const *A,
int lda,
half const *B,
int ldb,
cutlass::half_t beta,
half *C,
int ldc) {
// Define a CUTLASS Gemm using mixed-precision floating-point.
//
// A, B, C, D are half-precision. Internal accumulation is in single-precision.
//
// Note, we use CUDA's `half` type for device-side code including CUTLASS GEMM kernels.
//
typedef cutlass::gemm::Fp16SgemmSgemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<16, 128, 128>,
half, // A type
half, // B type
half, // C type
half, // D type
half // Scalar type: alpha, beta
>
GemmTraits;
// Define a CUTLASS GEMM object.
typedef cutlass::gemm::Gemm<GemmTraits> Gemm;
// Construct and initialize CUTLASS GEMM parameters object.
typename Gemm::Params params;
int result = params.initialize(
M, // GEMM M dimension
N, // GEMM N dimension
K, // GEMM K dimension
reinterpret_cast<half const &>(alpha), // scalar alpha
A, // matrix A operand
lda,
B, // matrix B operand
ldb,
reinterpret_cast<half const &>(beta), // scalar beta
C, // source matrix C
ldc,
C, // destination matrix C (may be different memory than source C matrix)
ldc
);
if (result) {
std::cerr << "Failed to initialize CUTLASS Gemm::Params object." << std::endl;
return hipErrorInvalidValue;
}
// Launch the CUTLASS GEMM kernel.
Gemm::launch(params);
// Return any errors associated with the launch or hipSuccess if no error.
return hipGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
hipError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) {
hipError_t result;
//
// Construct cutlass::HostMatrix<> using the half-precision host-side type.
//
// cutlass::HostMatrix<> allocates memory on both the host and device corresponding to rank=2
// tensors in column-major layout. Explicit synchronization methods are offered to copy the
// tensor to the device or to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostMatrix<cutlass::half_t> A(cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostMatrix<cutlass::half_t> B(cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostMatrix<cutlass::half_t> C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostMatrix<cutlass::half_t> C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
cutlass::Distribution dist;
// Uniform random distribution from -4 .. 4. Values are truncated to integers.
dist.set_uniform(-4, 4);
// Arbitrary RNG seed value. Hard-coded for deterministic results.
int seed = 2080;
cutlass::reference::device::TensorInitialize(
A.device_view(), // concept: TensorView
seed,
dist);
cutlass::reference::device::TensorInitialize(
B.device_view(), // concept: TensorView
seed * 2,
dist);
cutlass::reference::device::TensorInitialize(
C_cutlass.device_view(), // concept: TensorView
seed * 3,
dist);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::reference::device::TensorFill(C_reference.device_view(), C_cutlass.device_view());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = Cutlass_FP16_SgemmNN(
M,
N,
K,
alpha,
A.device_data(),
A.leading_dim(),
B.device_data(),
B.leading_dim(),
beta,
C_cutlass.device_data(),
C_cutlass.leading_dim()
);
if (result != hipSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this example is to
// use the host-side reference GEMM, so we must perform a device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference implementation.
cutlass::reference::host::Gemm(
cutlass::gemm::GemmCoord(K, N, M), // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (concept: TensorRef)
B.host_ref(), // B (concept: TensorRef)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref(), // C (concept: TensorRef)
float(0) // Accumulator initial value passed as argument to deduce
); // internal accumulation data type as float.
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(C_reference.host_view(), C_cutlass.host_view())) {
std::cerr << "Error - CUTLASS mixed-precision GEMM kernel differs from reference." << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host memory as
// arrays of type cutlass::half_t.
//
// Result of CUTLASS mixed-precision GEMM kernel
std::cerr << "CUTLASS:\n" << C_cutlass << std::endl;
// Result of reference computation
std::cerr << "Reference:\n" << C_reference << std::endl;
// Return error code.
return hipErrorUnknown;
}
// Passed error check
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values stored as
// cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero.
//
cutlass::half_t scalars[2] = { 1, 0 };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
hipError_t result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == hipSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == hipSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| e24dc3a3e21ef23c4d490067a36dcb7412c32c14.cu | /***************************************************************************************************
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates several CUTLASS utilities in the context of a mixed-precision
floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing tensor and matrix
memory allocations, initializing and comparing results, and computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions appear
namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have
both host-side and device-side implementations, and the choice to use device-side initialization
and host-side verification in this example was arbitrary.
cutlass::half_t
This is a host-only implementation of a half-precision floating-point type. It requires no
specialized hardware support from the CPU and emulates arithmetic operations. Device-side code
should use CUDA's `half` type.
cutlass::HostMatrix<>
This template class simplifies the creation of a rank=2 tensor with either a column-major or
row-major layout in memory.
This class offers methods device_view() and host_view() to provide TensorView objects for
device- and host-side memory allocations.
cutlass::reference::device::TensorInitialize()
This template function initializes the elements of a tensor according to either a procedural
definition or a random distribution. The function in namespace `cutlass::reference::device::`
uses a CUDA kernel to perform this initialization, relying on CURAND to compute random numbers.
cutlass::reference::host::Gemm()
This template function computes the general matrix product. This template supports unique
data types for each matrix operand, the internal accumulation type, and the scalar parameters
alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// CUTLASS includes needed for mixed-precision GEMM kernel
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/fp16_sgemm_traits.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "tools/util/tensor_view_io.h"
// Defines cutlass::HostMatrix<>
#include "tools/util/host_matrix.h"
// Defines cutlass::half_t
#include "tools/util/half.h"
// Defines cutlass::reference::device::TensorInitialize()
#include "tools/util/reference/device/tensor_elementwise.h"
// Defines cutlass::reference::host::TensorEquals()
#include "tools/util/reference/host/tensor_elementwise.h"
// Defines cutlass::reference::host::Gemm()
#include "tools/util/reference/host/gemm.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
cudaError_t Cutlass_FP16_SgemmNN(
int M,
int N,
int K,
cutlass::half_t alpha,
half const *A,
int lda,
half const *B,
int ldb,
cutlass::half_t beta,
half *C,
int ldc) {
// Define a CUTLASS Gemm using mixed-precision floating-point.
//
// A, B, C, D are half-precision. Internal accumulation is in single-precision.
//
// Note, we use CUDA's `half` type for device-side code including CUTLASS GEMM kernels.
//
typedef cutlass::gemm::Fp16SgemmSgemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<16, 128, 128>,
half, // A type
half, // B type
half, // C type
half, // D type
half // Scalar type: alpha, beta
>
GemmTraits;
// Define a CUTLASS GEMM object.
typedef cutlass::gemm::Gemm<GemmTraits> Gemm;
// Construct and initialize CUTLASS GEMM parameters object.
typename Gemm::Params params;
int result = params.initialize(
M, // GEMM M dimension
N, // GEMM N dimension
K, // GEMM K dimension
reinterpret_cast<half const &>(alpha), // scalar alpha
A, // matrix A operand
lda,
B, // matrix B operand
ldb,
reinterpret_cast<half const &>(beta), // scalar beta
C, // source matrix C
ldc,
C, // destination matrix C (may be different memory than source C matrix)
ldc
);
if (result) {
std::cerr << "Failed to initialize CUTLASS Gemm::Params object." << std::endl;
return cudaErrorInvalidValue;
}
// Launch the CUTLASS GEMM kernel.
Gemm::launch(params);
// Return any errors associated with the launch or cudaSuccess if no error.
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
cudaError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) {
cudaError_t result;
//
// Construct cutlass::HostMatrix<> using the half-precision host-side type.
//
// cutlass::HostMatrix<> allocates memory on both the host and device corresponding to rank=2
// tensors in column-major layout. Explicit synchronization methods are offered to copy the
// tensor to the device or to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostMatrix<cutlass::half_t> A(cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostMatrix<cutlass::half_t> B(cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostMatrix<cutlass::half_t> C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostMatrix<cutlass::half_t> C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
cutlass::Distribution dist;
// Uniform random distribution from -4 .. 4. Values are truncated to integers.
dist.set_uniform(-4, 4);
// Arbitrary RNG seed value. Hard-coded for deterministic results.
int seed = 2080;
cutlass::reference::device::TensorInitialize(
A.device_view(), // concept: TensorView
seed,
dist);
cutlass::reference::device::TensorInitialize(
B.device_view(), // concept: TensorView
seed * 2,
dist);
cutlass::reference::device::TensorInitialize(
C_cutlass.device_view(), // concept: TensorView
seed * 3,
dist);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::reference::device::TensorFill(C_reference.device_view(), C_cutlass.device_view());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = Cutlass_FP16_SgemmNN(
M,
N,
K,
alpha,
A.device_data(),
A.leading_dim(),
B.device_data(),
B.leading_dim(),
beta,
C_cutlass.device_data(),
C_cutlass.leading_dim()
);
if (result != cudaSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this example is to
// use the host-side reference GEMM, so we must perform a device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference implementation.
cutlass::reference::host::Gemm(
cutlass::gemm::GemmCoord(K, N, M), // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (concept: TensorRef)
B.host_ref(), // B (concept: TensorRef)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref(), // C (concept: TensorRef)
float(0) // Accumulator initial value passed as argument to deduce
); // internal accumulation data type as float.
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(C_reference.host_view(), C_cutlass.host_view())) {
std::cerr << "Error - CUTLASS mixed-precision GEMM kernel differs from reference." << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host memory as
// arrays of type cutlass::half_t.
//
// Result of CUTLASS mixed-precision GEMM kernel
std::cerr << "CUTLASS:\n" << C_cutlass << std::endl;
// Result of reference computation
std::cerr << "Reference:\n" << C_reference << std::endl;
// Return error code.
return cudaErrorUnknown;
}
// Passed error check
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values stored as
// cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero.
//
cutlass::half_t scalars[2] = { 1, 0 };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
cudaError_t result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
ddbb4634e01230f68725cd8b16663bb11b870097.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright [2016] <Contributors>
* \file Correation.cu
* \brief Correlation operator
* \author Xu Dong
*/
#include "./correlation-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#define ROUND_OFF 50000
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CORRELATION_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
// == Correlation Kernel
template <typename Dtype>
__global__ void CorrelateData(const int nthreads, int num, int topwidth,
int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const Dtype *bottom0, const Dtype *bottom1, Dtype *top) {
extern __shared__ char patch_data_char[];
Dtype *patch_data = reinterpret_cast<Dtype *>(patch_data_char);
// First (upper left) position of kernel upper-left corner
// in current center position of neighborhood in image 1
int x1 = blockIdx.x * stride1 + max_displacement;
int y1 = blockIdx.y * stride1 + max_displacement;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ Dtype sum[THREADS_PER_WARP * WARPS_PER_BLOCK];
// Compute correlation
for (int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int x2 = x1 + s2o;
int y2 = y1 + s2p;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + ch;
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if (ch_off == 0) {
Dtype total_sum = 0;
for (int idx = 0; idx < THREADS_PER_WARP * WARPS_PER_BLOCK; idx++) {
total_sum += sum[idx];
}
const int sumelems = kernel_size * kernel_size * bottomchannels;
const int index = ((top_channel * topheight + blockIdx.y) * topwidth) + blockIdx.x;
top[index + item*topcount] = total_sum / static_cast<float>(sumelems);
} // Aggregate result of different threads
}
}
// == Correlation Backward Pass Kernel (For data1)
template <typename Dtype>
__global__ void CorrelateDataBackward0(const int nthreads, int num, int item,
int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o))\
* bottomchannels + n;
Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\
+ (o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot1tmp;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1(const int nthreads,
int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m - s2p)) \
* pbottomwidth + (l - s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * \
neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y)\
* topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot0tmp;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Kernel Subtraction
template <typename Dtype>
__global__ void CorrelateDataSubtract(const int nthreads, int num, int item,
int topwidth, int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const Dtype *bottom0, const Dtype *bottom1, Dtype *top) {
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % topwidth; // w-pos
int y = (index / topwidth) % topheight; // h-pos
int c = (index / topwidth / topheight) % topchannels; // channels
// Offset of patch in image 2
int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
// First (upper left) position of kernel center in current neighborhood in image 1
int x1 = x*stride1 + kernel_radius + max_displacement;
int y1 = y*stride1 + kernel_radius + max_displacement;
// Iterate through 3D patch
Dtype sum = 0;
for (int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT
for (int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH
for (int l = 0; l < bottomchannels; l++) { // CHANNELS
// Calculate position in image 2
int x2 = x1 + s2o;
int y2 = y1 + s2p;
// Indices in bottom data: (CH=l,W=x2,H=y2,N)
int idx1 = ((item * bottomheight + y1 + j) * bottomwidth + x1 + i) \
* bottomchannels + l;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) \
* bottomchannels + l;
// Do the correlation:
sum += fabsf(bottom0[idx1] - bottom1[idx2]);
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
top[index + item * topcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 0)
template <typename Dtype>
__global__ void CorrelateDataBackward0Subtract(const int nthreads, int num,
int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius,
int stride1, int stride2, int bottomwidth, int bottomheight,
int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
Dtype *bottom0diff, const Dtype *bottom0, const Dtype *bottom1, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + l)\
* bottomchannels + n;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m+s2p)) * pbottomwidth\
+ (l+s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\
+ (o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1Subtract(const int nthreads, int num,
int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius,
int stride1, int stride2, int bottomwidth, int bottomheight,
int pbottomwidth, int pbottomheight, int bottomchannels,
int bottomcount, int pad_size, const Dtype *bottom0,
const Dtype *bottom1, Dtype *bottom1diff, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + l)\
* bottomchannels + n;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m - s2p)) * pbottomwidth + (l - s2o))\
* bottomchannels + n;
// bottom0[l+s2o,m+s2p,n]
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * \
neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y)\
* topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Forward
// == Dimension rearrangement Kernel
template <typename Dtype>
__global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num,
int channels, int width, int height, int widthheight, int padding, int pwidthheight) {
// change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel]
int xy = blockIdx.x * blockDim.x + threadIdx.x;
if (xy >= widthheight )
return;
int ch = blockIdx.y;
int n = blockIdx.z;
Dtype value = in[(n * channels + ch) * widthheight + xy];
__syncthreads();
int xpad = (xy % width + padding);
int ypad = (xy / width + padding);
int xypad = ypad * (width + 2 * padding) + xpad;
out[(n * pwidthheight + xypad) * channels + ch] = value;
}
template <typename Dtype>
void Forward_gpu(
const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data1,
const Tensor<gpu, 4, Dtype> &data2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_, int top_width_, int pad_size_,
bool is_multiply, int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_, hipStream_t stream,
hipStream_t stream_tmp1, hipStream_t stream_tmp2) {
const Dtype *bottom_data1 = data1.dptr_;
const Dtype *bottom_data2 = data2.dptr_;
Dtype *rbot1 = tmp1.dptr_;
Dtype *rbot2 = tmp2.dptr_;
Dtype *top = out.dptr_;
const int bnum = data1.size(0);
const int bchannels = data1.size(1);
const int bheight = data1.size(2);
const int bwidth = data1.size(3);
const int bwidthheight = bwidth * bheight;
const int topcount = top_width_ * top_height_ * top_channels_;
dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK);
int threads_per_block = 16;
dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum);
const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight + 2 * pad_size_);
hipLaunchKernelGGL(( blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp1,
bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
hipLaunchKernelGGL(( blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp2,
bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
const int num = bnum;
const int channels = bchannels;
const int height = bheight + 2 * pad_size_;
const int width = bwidth + 2 * pad_size_;
const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels;
if (is_multiply == true) {
// CorrelationLayer
int topThreadCount = topcount;
dim3 totalBlocksCorr(top_width_, top_height_, num);
hipLaunchKernelGGL(( CorrelateData<Dtype>), dim3(totalBlocksCorr), dim3(threadsPerBlock),
shared_memory_per_block * sizeof(Dtype), stream,
topThreadCount,
num, top_width_, top_height_, top_channels_, topcount,
max_displacement_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_, kernel_size_,
stride1_, stride2_,
width, height, channels,
rbot1, rbot2, top);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
} else {
// CorrelationLayer
for (int n = 0; n < num; n++) {
int topThreadCount = topcount;
const int gridSize = (topThreadCount + kMaxThreadsPerBlock - 1)\
/ kMaxThreadsPerBlock;
hipLaunchKernelGGL(( CorrelateDataSubtract<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream,
topThreadCount,
num, n, top_width_, top_height_, top_channels_, topcount,
max_displacement_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_, width, height, channels, rbot1, rbot2, top);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
}
}
}
template <typename Dtype>
void Backward_gpu(
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 4, Dtype> &in_grad1,
const Tensor<gpu, 4, Dtype> &in_grad2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_,
hipStream_t stream0, hipStream_t stream1,
int num, int channels, int height, int width) {
// Get top diff, compute bottom diff
const Dtype* top_diff = out_grad.dptr_;
Dtype* bottom0_diff = in_grad1.dptr_;
Dtype* bottom1_diff = in_grad2.dptr_;
const Dtype* rbot1 = tmp1.dptr_;
const Dtype* rbot2 = tmp2.dptr_;
const int paddedheight = height + 2 * pad_size_;
const int paddedwidth = width + 2 * pad_size_;
const int bottomcount = channels * height * width;
int botThreadCount = bottomcount;
const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
// CorrelationLayerBackward
if (is_multiply == true) {
// == Run kernel Backward 0
dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest
const int buffer_size_backw0 = \
(static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_)\
/ static_cast<float>(stride1_))) + 1) * top_channels_;
// == Run kernel Backward 0
for (int n = 0; n < num; n++) {
hipLaunchKernelGGL(( CorrelateDataBackward0<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream0,
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
bottom0_diff, rbot2, top_diff);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
}
// == Run kernel Backward 1
for (int n = 0; n < num; n++) {
hipLaunchKernelGGL(( CorrelateDataBackward1<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream1,
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
rbot1, bottom1_diff, top_diff);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
}
} else {
for (int n = 0; n < num; n++) {
// Bottom0:
hipLaunchKernelGGL(( CorrelateDataBackward0Subtract<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream0,
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
bottom0_diff, rbot1, rbot2, top_diff);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
}
for (int n = 0; n < num; n++) {
// Bottom1:
hipLaunchKernelGGL(( CorrelateDataBackward1Subtract<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream1,
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
rbot1, rbot2, bottom1_diff, top_diff);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
}
}
}
} // namespace cuda
template<typename Dtype>
inline void CorrelationForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data1,
const Tensor<gpu, 4, Dtype> &data2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_
) {
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_);
hipStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_);
cuda::Forward_gpu(out, data1, data2, tmp1, tmp2, top_channels_, top_height_,
top_width_, pad_size_, is_multiply, max_displacement_, kernel_size_,
neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_, stream, stream_tmp1, stream_tmp2);
}
template<typename Dtype>
inline void CorrelationBackward(const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 4, Dtype> &in_grad1,
const Tensor<gpu, 4, Dtype> &in_grad2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_,
int stride2_, int num, int channels, int height, int width
) {
hipStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_);
hipStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_);
cuda::Backward_gpu(out_grad, in_grad1, in_grad2, tmp1, tmp2, top_channels_,
top_height_, top_width_, pad_size_, is_multiply,
max_displacement_, kernel_size_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_, stride1_, stride2_,
stream0, stream1, num, channels, height, width);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(CorrelationParam param) {
return new CorrelationOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
| ddbb4634e01230f68725cd8b16663bb11b870097.cu | /*!
* Copyright [2016] <Contributors>
* \file Correation.cu
* \brief Correlation operator
* \author Xu Dong
*/
#include "./correlation-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#define ROUND_OFF 50000
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CORRELATION_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
// == Correlation Kernel
template <typename Dtype>
__global__ void CorrelateData(const int nthreads, int num, int topwidth,
int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const Dtype *bottom0, const Dtype *bottom1, Dtype *top) {
extern __shared__ char patch_data_char[];
Dtype *patch_data = reinterpret_cast<Dtype *>(patch_data_char);
// First (upper left) position of kernel upper-left corner
// in current center position of neighborhood in image 1
int x1 = blockIdx.x * stride1 + max_displacement;
int y1 = blockIdx.y * stride1 + max_displacement;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ Dtype sum[THREADS_PER_WARP * WARPS_PER_BLOCK];
// Compute correlation
for (int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int x2 = x1 + s2o;
int y2 = y1 + s2p;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + ch;
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if (ch_off == 0) {
Dtype total_sum = 0;
for (int idx = 0; idx < THREADS_PER_WARP * WARPS_PER_BLOCK; idx++) {
total_sum += sum[idx];
}
const int sumelems = kernel_size * kernel_size * bottomchannels;
const int index = ((top_channel * topheight + blockIdx.y) * topwidth) + blockIdx.x;
top[index + item*topcount] = total_sum / static_cast<float>(sumelems);
} // Aggregate result of different threads
}
}
// == Correlation Backward Pass Kernel (For data1)
template <typename Dtype>
__global__ void CorrelateDataBackward0(const int nthreads, int num, int item,
int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o))\
* bottomchannels + n;
Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\
+ (o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot1tmp;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1(const int nthreads,
int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m - s2p)) \
* pbottomwidth + (l - s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * \
neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y)\
* topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot0tmp;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Kernel Subtraction
template <typename Dtype>
__global__ void CorrelateDataSubtract(const int nthreads, int num, int item,
int topwidth, int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const Dtype *bottom0, const Dtype *bottom1, Dtype *top) {
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % topwidth; // w-pos
int y = (index / topwidth) % topheight; // h-pos
int c = (index / topwidth / topheight) % topchannels; // channels
// Offset of patch in image 2
int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
// First (upper left) position of kernel center in current neighborhood in image 1
int x1 = x*stride1 + kernel_radius + max_displacement;
int y1 = y*stride1 + kernel_radius + max_displacement;
// Iterate through 3D patch
Dtype sum = 0;
for (int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT
for (int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH
for (int l = 0; l < bottomchannels; l++) { // CHANNELS
// Calculate position in image 2
int x2 = x1 + s2o;
int y2 = y1 + s2p;
// Indices in bottom data: (CH=l,W=x2,H=y2,N)
int idx1 = ((item * bottomheight + y1 + j) * bottomwidth + x1 + i) \
* bottomchannels + l;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) \
* bottomchannels + l;
// Do the correlation:
sum += fabsf(bottom0[idx1] - bottom1[idx2]);
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
top[index + item * topcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 0)
template <typename Dtype>
__global__ void CorrelateDataBackward0Subtract(const int nthreads, int num,
int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius,
int stride1, int stride2, int bottomwidth, int bottomheight,
int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
Dtype *bottom0diff, const Dtype *bottom0, const Dtype *bottom1, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + l)\
* bottomchannels + n;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m+s2p)) * pbottomwidth\
+ (l+s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\
+ (o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1Subtract(const int nthreads, int num,
int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius,
int stride1, int stride2, int bottomwidth, int bottomheight,
int pbottomwidth, int pbottomheight, int bottomchannels,
int bottomcount, int pad_size, const Dtype *bottom0,
const Dtype *bottom1, Dtype *bottom1diff, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + l)\
* bottomchannels + n;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m - s2p)) * pbottomwidth + (l - s2o))\
* bottomchannels + n;
// bottom0[l+s2o,m+s2p,n]
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * \
neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y)\
* topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Forward
// == Dimension rearrangement Kernel
template <typename Dtype>
__global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num,
int channels, int width, int height, int widthheight, int padding, int pwidthheight) {
// change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel]
int xy = blockIdx.x * blockDim.x + threadIdx.x;
if (xy >= widthheight )
return;
int ch = blockIdx.y;
int n = blockIdx.z;
Dtype value = in[(n * channels + ch) * widthheight + xy];
__syncthreads();
int xpad = (xy % width + padding);
int ypad = (xy / width + padding);
int xypad = ypad * (width + 2 * padding) + xpad;
out[(n * pwidthheight + xypad) * channels + ch] = value;
}
template <typename Dtype>
void Forward_gpu(
const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data1,
const Tensor<gpu, 4, Dtype> &data2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_, int top_width_, int pad_size_,
bool is_multiply, int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_, cudaStream_t stream,
cudaStream_t stream_tmp1, cudaStream_t stream_tmp2) {
const Dtype *bottom_data1 = data1.dptr_;
const Dtype *bottom_data2 = data2.dptr_;
Dtype *rbot1 = tmp1.dptr_;
Dtype *rbot2 = tmp2.dptr_;
Dtype *top = out.dptr_;
const int bnum = data1.size(0);
const int bchannels = data1.size(1);
const int bheight = data1.size(2);
const int bwidth = data1.size(3);
const int bwidthheight = bwidth * bheight;
const int topcount = top_width_ * top_height_ * top_channels_;
dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK);
int threads_per_block = 16;
dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum);
const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight + 2 * pad_size_);
blob_rearrange_kernel2<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp1>>>
(bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
blob_rearrange_kernel2<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp2>>>
(bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
const int num = bnum;
const int channels = bchannels;
const int height = bheight + 2 * pad_size_;
const int width = bwidth + 2 * pad_size_;
const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels;
if (is_multiply == true) {
// CorrelationLayer
int topThreadCount = topcount;
dim3 totalBlocksCorr(top_width_, top_height_, num);
CorrelateData<Dtype><<<totalBlocksCorr, threadsPerBlock,
shared_memory_per_block * sizeof(Dtype), stream>>>(
topThreadCount,
num, top_width_, top_height_, top_channels_, topcount,
max_displacement_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_, kernel_size_,
stride1_, stride2_,
width, height, channels,
rbot1, rbot2, top);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
} else {
// CorrelationLayer
for (int n = 0; n < num; n++) {
int topThreadCount = topcount;
const int gridSize = (topThreadCount + kMaxThreadsPerBlock - 1)\
/ kMaxThreadsPerBlock;
CorrelateDataSubtract<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream>>>(
topThreadCount,
num, n, top_width_, top_height_, top_channels_, topcount,
max_displacement_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_, width, height, channels, rbot1, rbot2, top);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
}
}
}
template <typename Dtype>
void Backward_gpu(
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 4, Dtype> &in_grad1,
const Tensor<gpu, 4, Dtype> &in_grad2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_,
cudaStream_t stream0, cudaStream_t stream1,
int num, int channels, int height, int width) {
// Get top diff, compute bottom diff
const Dtype* top_diff = out_grad.dptr_;
Dtype* bottom0_diff = in_grad1.dptr_;
Dtype* bottom1_diff = in_grad2.dptr_;
const Dtype* rbot1 = tmp1.dptr_;
const Dtype* rbot2 = tmp2.dptr_;
const int paddedheight = height + 2 * pad_size_;
const int paddedwidth = width + 2 * pad_size_;
const int bottomcount = channels * height * width;
int botThreadCount = bottomcount;
const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
// CorrelationLayerBackward
if (is_multiply == true) {
// == Run kernel Backward 0
dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest
const int buffer_size_backw0 = \
(static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_)\
/ static_cast<float>(stride1_))) + 1) * top_channels_;
// == Run kernel Backward 0
for (int n = 0; n < num; n++) {
CorrelateDataBackward0<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream0>>>(
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
bottom0_diff, rbot2, top_diff);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
}
// == Run kernel Backward 1
for (int n = 0; n < num; n++) {
CorrelateDataBackward1<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream1>>>(
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
rbot1, bottom1_diff, top_diff);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
}
} else {
for (int n = 0; n < num; n++) {
// Bottom0:
CorrelateDataBackward0Subtract<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream0>>>(
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
bottom0_diff, rbot1, rbot2, top_diff);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
}
for (int n = 0; n < num; n++) {
// Bottom1:
CorrelateDataBackward1Subtract<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream1>>>(
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
rbot1, rbot2, bottom1_diff, top_diff);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
}
}
}
} // namespace cuda
template<typename Dtype>
inline void CorrelationForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data1,
const Tensor<gpu, 4, Dtype> &data2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_
) {
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
cudaStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_);
cudaStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_);
cuda::Forward_gpu(out, data1, data2, tmp1, tmp2, top_channels_, top_height_,
top_width_, pad_size_, is_multiply, max_displacement_, kernel_size_,
neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_, stream, stream_tmp1, stream_tmp2);
}
template<typename Dtype>
inline void CorrelationBackward(const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 4, Dtype> &in_grad1,
const Tensor<gpu, 4, Dtype> &in_grad2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_,
int stride2_, int num, int channels, int height, int width
) {
cudaStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_);
cudaStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_);
cuda::Backward_gpu(out_grad, in_grad1, in_grad2, tmp1, tmp2, top_channels_,
top_height_, top_width_, pad_size_, is_multiply,
max_displacement_, kernel_size_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_, stride1_, stride2_,
stream0, stream1, num, channels, height, width);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(CorrelationParam param) {
return new CorrelationOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
d16e045b056a3c441a2eb6c17d24b284a205531a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ProjHelperFun.h"
#include "Constants.h"
#include "TridagPar.h"
#include "updateParamsKernels.cu.h"
#define BLOCK_DIM 8
void GPUimplicitX(PrivGlobs& globs, REAL* alist, REAL* blist, REAL* clist, const unsigned g){
unsigned int block_dim = BLOCK_DIM;
unsigned int numZ = max(globs.numX, globs.numY);
REAL dtInv = 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g]);
dim3 threadsPerBlock(block_dim, block_dim, 1);
dim3 num_blocks(ceil((float)globs.numX/block_dim), ceil((float)globs.numY/block_dim));
REAL* d_myVarX, *d_myDxx, *d_alist, *d_blist, *d_clist;
hipMalloc((void**)&d_myVarX, globs.numX*globs.numY*sizeof(REAL));
hipMalloc((void**)&d_myDxx, globs.numX*4*sizeof(REAL));
hipMalloc((void**)&d_alist, numZ*numZ*sizeof(REAL));
hipMalloc((void**)&d_blist, numZ*numZ*sizeof(REAL));
hipMalloc((void**)&d_clist, numZ*numZ*sizeof(REAL));
hipMemcpy(d_myVarX, globs.myVarX, globs.numX*globs.numY*sizeof(REAL), hipMemcpyHostToDevice);
hipMemcpy(d_myDxx, globs.myDxx, globs.numX*4*sizeof(REAL), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( implicitX), dim3(num_blocks), dim3(threadsPerBlock), 0, 0, globs.numX, globs.numY, dtInv, d_myVarX, d_myDxx,
d_alist, d_blist, d_clist);
hipMemcpy(alist, d_alist, numZ*numZ*sizeof(REAL), hipMemcpyDeviceToHost);
hipMemcpy(blist, d_blist, numZ*numZ*sizeof(REAL), hipMemcpyDeviceToHost);
hipMemcpy(clist, d_clist, numZ*numZ*sizeof(REAL), hipMemcpyDeviceToHost);
hipFree(d_myVarX);
hipFree(d_myDxx);
hipFree(d_alist);
hipFree(d_blist);
hipFree(d_clist);
}
void GPUimplicitY(PrivGlobs& globs, REAL* alist, REAL* blist, REAL* clist, const unsigned g){
unsigned int block_dim = BLOCK_DIM;
unsigned int numZ = max(globs.numX, globs.numY);
REAL dtInv = 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g]);
dim3 threadsPerBlock(block_dim, block_dim, 1);
dim3 num_blocks(ceil((float)globs.numX/block_dim), ceil((float)globs.numY/block_dim));
REAL* d_myVarY, *d_myDyy, *d_alist, *d_blist, *d_clist;
hipMalloc((void**)&d_myVarY, globs.numX*globs.numY*sizeof(REAL));
hipMalloc((void**)&d_myDyy, globs.numY*4*sizeof(REAL));
hipMalloc((void**)&d_alist, numZ*numZ*sizeof(REAL));
hipMalloc((void**)&d_blist, numZ*numZ*sizeof(REAL));
hipMalloc((void**)&d_clist, numZ*numZ*sizeof(REAL));
hipMemcpy(d_myVarY, globs.myVarY, globs.numX*globs.numY*sizeof(REAL), hipMemcpyHostToDevice);
hipMemcpy(d_myDyy, globs.myDyy, globs.numY*4*sizeof(REAL), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( implicitY), dim3(num_blocks), dim3(threadsPerBlock), 0, 0, globs.numX, globs.numY, dtInv, d_myVarY, d_myDyy,
d_alist, d_blist, d_clist);
hipMemcpy(alist, d_alist, numZ*numZ*sizeof(REAL), hipMemcpyDeviceToHost);
hipMemcpy(blist, d_blist, numZ*numZ*sizeof(REAL), hipMemcpyDeviceToHost);
hipMemcpy(clist, d_clist, numZ*numZ*sizeof(REAL), hipMemcpyDeviceToHost);
hipFree(d_myVarY);
hipFree(d_myDyy);
hipFree(d_alist);
hipFree(d_blist);
hipFree(d_clist);
}
void updateParams(const unsigned g, const REAL alpha, const REAL beta, const REAL nu, PrivGlobs& globs)
{
for(unsigned i=0;i<globs.numX;++i)
for(unsigned j=0;j<globs.numY;++j) {
globs.myVarX[i * globs.numY + j] =
exp(2.0*( beta*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
);
globs.myVarY[i * globs.numY + j] =
exp(2.0*( alpha*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
); // nu*nu
}
}
void setPayoff(PrivGlobs& globs )
{
//REAL* payoff = (REAL*) malloc(globs.outer * globs.numX*sizeof(REAL));
for(unsigned h=0;h<globs.outer;h++)
for(unsigned i=0;i<globs.numX;++i) {
for(unsigned j=0;j<globs.numY;++j)
globs.myResult[h * globs.numX * globs.numY + i * globs.numY + j] =
max(globs.myX[i]-0.001*h, (REAL)0.0);
}
}
inline void tridag(
REAL* a, // size [n]
REAL* b, // size [n]
REAL* c, // size [n]
REAL* r, // size [n]
const int n,
REAL* u, // size [n]
REAL* uu // size [n] temporary
) {
int i;//, offset;
REAL beta;
u[0] = r[0];
uu[0] = b[0];
for(i=1; i<n; i++) {
beta = a[i] / uu[i-1];
uu[i] = b[i] - beta*c[i-1];
u[i] = r[i] - beta*u[i-1];
}
#if 1
// X) this is a backward recurrence
u[n-1] = u[n-1] / uu[n-1];
for(i=n-2; i>=0; i--) {
u[i] = (u[i] - c[i]*u[i+1]) / uu[i];
}
#else
// Hint: X) can be written smth like (once you make a non-constant)
for(i=0; i<n; i++) a[i] = u[n-1-i];
a[0] = a[0] / uu[n-1];
for(i=1; i<n; i++) a[i] = (a[i] - c[n-1-i]*a[i-1]) / uu[n-1-i];
for(i=0; i<n; i++) u[i] = a[n-1-i];
#endif
}
void
rollback( const unsigned g, const unsigned h, PrivGlobs& globs) {
unsigned numX = globs.numX,
numY = globs.numY;
unsigned numZ = max(numX,numY);
unsigned i, j;
REAL dtInv = 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g]);
/*
vector< vector<REAL> > u(numY, vector<REAL>(numX)); // [numY][numX]
vector< vector<REAL> > v(numX, vector<REAL>(numY)); // [numX][numY]
vector< vector<REAL> > alist(numZ, vector<REAL>(numZ));
vector< vector<REAL> > blist(numZ, vector<REAL>(numZ));
vector< vector<REAL> > clist(numZ, vector<REAL>(numZ));
vector< vector<REAL> > ylist(numZ, vector<REAL>(numZ));
vector<REAL> yy(numZ); // temporary used in tridag // [max(numX,numY)]
*/
REAL* u = (REAL*) malloc(numY*numX*sizeof(REAL));
REAL* v = (REAL*) malloc(numY*numX*sizeof(REAL));
REAL* alist = (REAL*) malloc(numZ*numZ*sizeof(REAL));
REAL* blist = (REAL*) malloc(numZ*numZ*sizeof(REAL));
REAL* clist = (REAL*) malloc(numZ*numZ*sizeof(REAL));
REAL* ylist = (REAL*) malloc(numZ*numZ*sizeof(REAL));
REAL* yy = (REAL*) malloc(numZ*sizeof(REAL));
unsigned int expand = numX * numY;
// explicit x
for(i=0;i<numX;i++) {
for(j=0;j<numY;j++) {
u[j * numX + i] = dtInv*globs.myResult[h * expand + i * numY + j];
if(i > 0) {
u[j * numX + i] += 0.5*( 0.5*globs.myVarX[i * numY + j]
*globs.myDxx[i * 4 + 0])
* globs.myResult[h * expand + (i-1) * numY + j];
}
u[j * numX + i] += 0.5*( 0.5*globs.myVarX[i * numY + j]
*globs.myDxx[i * 4 + 1])
*globs.myResult[h * expand + i * numY + j];
if(i < numX-1) {
u[j * numX + i] += 0.5*( 0.5*globs.myVarX[i * numY + j]
*globs.myDxx[i * 4 + 2])
* globs.myResult[h * expand + (i+1) * numY + j];
}
}
}
// explicit y
for(j=0;j<numY;j++)
{
for(i=0;i<numX;i++) {
v[ i* numY + j] = 0.0;
if(j > 0) {
v[ i* numY + j] += ( 0.5*globs.myVarY[i * numY+ j]
*globs.myDyy[j * 4 + 0] )
*globs.myResult[h * expand + i * numY + (j-1)];
}
v[ i* numY + j] += ( 0.5*globs.myVarY[i * numY +j]
*globs.myDyy[j * 4 + 1] )
*globs.myResult[h * expand + i * numY + j];
if(j < numY-1) {
v[ i* numY + j] += ( 0.5*globs.myVarY[i * numY +j]
*globs.myDyy[j * 4 + 2] )
*globs.myResult[h * expand + i * numY + (j+1)];
}
u[j * numX + i] += v[ i * numY + j];
}
}
/*
// implicit x
for(j=0;j<numY;j++) {
for(i=0;i<numX;i++) { // here a,b,c should have size [numX]
alist[ j * numX + i] = - 0.5*(0.5*
globs.myVarX[ i * numY + j]*globs.myDxx[i * 4 + 0]);
blist[ j * numX + i] = dtInv - 0.5*(0.5*
globs.myVarX[i * numY + j]*globs.myDxx[i * 4 + 1]);
clist[ j * numX + i] = - 0.5*(0.5*
globs.myVarX[i * numY + j]*globs.myDxx[i * 4 + 2]);
}
//tridagPar(alist[j],blist[j],clist[j],u[j],numX,u[j],yy);
// here yy should have size [numX]
}
*/
GPUimplicitX(globs, alist, blist, clist, g);
for(j=0;j<numY;j++) {
tridag(&alist[ j*numX],&blist[ j*numX],
&clist[ j*numX],&u[ j*numX],
numX,&u[j*numX],yy);
}
/*
// implicit y
for(i=0;i<numX;i++) {
for(j=0;j<numY;j++) { // here a, b, c should have size [numY]
alist[ i * numY + j] = - 0.5*(0.5*globs.myVarY[i * numY + j]
*globs.myDyy[j * 4 + 0]);
blist[ i * numY + j] = dtInv - 0.5*(0.5
*globs.myVarY[i * numY + j]
*globs.myDyy[j * 4 + 1]);
clist[ i * numY + j] = - 0.5*(0.5
*globs.myVarY[i * numY + j]
*globs.myDyy[j * 4 + 2]);
}
}
*/
// here yy should have size [numY]
GPUimplicitY(globs, alist, blist, clist, g);
for(i=0;i<numX;i++) {
for(j=0;j<numY;j++) {
ylist[ i * numY + j] = dtInv*u[ + j * numX + i]
- 0.5*v[ i * numY + j];
}
}
for(i=0;i<numX;i++) {
tridag(&alist[ i*numY],&blist[ i*numY],
&clist[ i*numY],&ylist[ i*numY],
numY,&globs.myResult[h * expand + i*numY],yy);
}
}
REAL value( PrivGlobs globs,
const REAL s0,
const REAL strike,
const REAL t,
const REAL alpha,
const REAL nu,
const REAL beta,
const unsigned int numX,
const unsigned int numY,
const unsigned int numT
) {
initGrid(s0,alpha,nu,t, numX, numY, numT, globs);
initOperator(globs.myX,globs.myDxx, numX);
initOperator(globs.myY,globs.myDyy, numY);
setPayoff(globs);
for(int i = numT-2;i>=0;--i)
{
updateParams(i,alpha,beta,nu,globs);
//rollback(i, 2, globs);
}
return globs.myResult[globs.myXindex * numY + globs.myYindex];
}
void GPUupdateParams(const unsigned g, const REAL alpha, const REAL beta,
const REAL nu, PrivGlobs& globs)
{
unsigned int block_dim = BLOCK_DIM;
dim3 threadsPerBlock(block_dim, block_dim, 1);
dim3 num_blocks(ceil((float)globs.numX/block_dim), ceil((float)globs.numY/block_dim),1);
REAL *d_myVarX, *d_myVarY, *d_myX, *d_myY, *d_myTimeline;
hipMalloc((void**)&d_myVarX, globs.numX*globs.numY*sizeof(REAL));
hipMalloc((void**)&d_myVarY, globs.numX*globs.numY*sizeof(REAL));
hipMalloc((void**)&d_myX, globs.numX*sizeof(REAL));
hipMalloc((void**)&d_myY, globs.numY*sizeof(REAL));
hipMalloc((void**)&d_myTimeline, globs.numT*sizeof(REAL));
hipMemcpy(d_myVarX, globs.myVarX, globs.numX*globs.numY*sizeof(REAL), hipMemcpyHostToDevice);
hipMemcpy(d_myVarY, globs.myVarY, globs.numX*globs.numY*sizeof(REAL), hipMemcpyHostToDevice);
hipMemcpy(d_myX, globs.myX, globs.numX*sizeof(REAL), hipMemcpyHostToDevice);
hipMemcpy(d_myY, globs.myY, globs.numY*sizeof(REAL), hipMemcpyHostToDevice);
hipMemcpy(d_myTimeline, globs.myTimeline,
globs.numT*sizeof(REAL), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( updateParamsKer), dim3(num_blocks), dim3(threadsPerBlock), 0, 0, g, alpha, beta, nu, globs.numX, globs.numY,
d_myX, d_myY, d_myVarX, d_myVarY, d_myTimeline);
hipDeviceSynchronize();
hipMemcpy(globs.myVarX, d_myVarX, globs.numX*globs.numY*sizeof(REAL), hipMemcpyDeviceToHost);
hipMemcpy(globs.myVarY, d_myVarY, globs.numX*globs.numY*sizeof(REAL), hipMemcpyDeviceToHost);
hipFree(d_myVarX);
hipFree(d_myVarY);
hipFree(d_myX);
hipFree(d_myY);
hipFree(d_myTimeline);
}
void GPUsetParams(PrivGlobs& globs)
{
unsigned int block_dim = BLOCK_DIM;
dim3 threadsPerBlock(block_dim, block_dim, 1);
dim3 num_blocks(ceil((float)globs.numX/block_dim), ceil((float)globs.numY/block_dim),globs.outer);
REAL *d_myX, *d_myResult;
hipMalloc((void**)&d_myResult, globs.outer*globs.numX*globs.numY*sizeof(REAL));
hipMalloc((void**)&d_myX, globs.numX*sizeof(REAL));
hipMemcpy(d_myResult, globs.myResult, globs.outer*globs.numX*globs.numY*sizeof(REAL), hipMemcpyHostToDevice);
hipMemcpy(d_myX, globs.myX, globs.numX*sizeof(REAL), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( setParamsKer), dim3(num_blocks), dim3(threadsPerBlock), 0, 0, globs.numX, globs.numY, d_myX, d_myResult);
hipDeviceSynchronize();
hipMemcpy(globs.myResult, d_myResult, globs.outer*globs.numX*globs.numY*sizeof(REAL), hipMemcpyDeviceToHost);
hipFree(d_myX);
hipFree(d_myResult);
}
void run_OrigCPU(
const unsigned int& outer,
const unsigned int& numX,
const unsigned int& numY,
const unsigned int& numT,
const REAL& s0,
const REAL& t,
const REAL& alpha,
const REAL& nu,
const REAL& beta,
REAL* res // [outer] RESULT
) {
PrivGlobs globs(numX, numY, numT, outer);
/*
for(unsigned i = 0; i < outer;++i) {
strike[i] = 0.001*i;
//PrivGlobs globs(numX, numY, numT, outer);
//globslist[i] = globs;
}*/
initGrid(s0,alpha,nu,t, numX, numY, numT, globs);
initOperator(globs.myX,globs.myDxx, numX);
initOperator(globs.myY,globs.myDyy, numY);
//setPayoff(globs);
GPUsetParams(globs);
for(int h=0; h<outer; h++) {
for(int i = numT-2;i>=0;--i)
{
GPUupdateParams(i,alpha,beta,nu,globs);
rollback(i, h, globs);
}
}
for(unsigned i = 0; i < outer; ++i){
res[i] = globs.myResult[i * numX * numY + globs.myXindex * numY + globs.myYindex];
}
/*
for( unsigned i = 0; i < outer; ++ i ) {
res[i] = value( globslist[i], s0, strike[i], t,
alpha, nu, beta,
numX, numY, numT );
}*/
}
//#endif // PROJ_CORE_ORIG
| d16e045b056a3c441a2eb6c17d24b284a205531a.cu | #include "ProjHelperFun.h"
#include "Constants.h"
#include "TridagPar.h"
#include "updateParamsKernels.cu.h"
#define BLOCK_DIM 8
void GPUimplicitX(PrivGlobs& globs, REAL* alist, REAL* blist, REAL* clist, const unsigned g){
unsigned int block_dim = BLOCK_DIM;
unsigned int numZ = max(globs.numX, globs.numY);
REAL dtInv = 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g]);
dim3 threadsPerBlock(block_dim, block_dim, 1);
dim3 num_blocks(ceil((float)globs.numX/block_dim), ceil((float)globs.numY/block_dim));
REAL* d_myVarX, *d_myDxx, *d_alist, *d_blist, *d_clist;
cudaMalloc((void**)&d_myVarX, globs.numX*globs.numY*sizeof(REAL));
cudaMalloc((void**)&d_myDxx, globs.numX*4*sizeof(REAL));
cudaMalloc((void**)&d_alist, numZ*numZ*sizeof(REAL));
cudaMalloc((void**)&d_blist, numZ*numZ*sizeof(REAL));
cudaMalloc((void**)&d_clist, numZ*numZ*sizeof(REAL));
cudaMemcpy(d_myVarX, globs.myVarX, globs.numX*globs.numY*sizeof(REAL), cudaMemcpyHostToDevice);
cudaMemcpy(d_myDxx, globs.myDxx, globs.numX*4*sizeof(REAL), cudaMemcpyHostToDevice);
implicitX<<<num_blocks, threadsPerBlock>>>(globs.numX, globs.numY, dtInv, d_myVarX, d_myDxx,
d_alist, d_blist, d_clist);
cudaMemcpy(alist, d_alist, numZ*numZ*sizeof(REAL), cudaMemcpyDeviceToHost);
cudaMemcpy(blist, d_blist, numZ*numZ*sizeof(REAL), cudaMemcpyDeviceToHost);
cudaMemcpy(clist, d_clist, numZ*numZ*sizeof(REAL), cudaMemcpyDeviceToHost);
cudaFree(d_myVarX);
cudaFree(d_myDxx);
cudaFree(d_alist);
cudaFree(d_blist);
cudaFree(d_clist);
}
void GPUimplicitY(PrivGlobs& globs, REAL* alist, REAL* blist, REAL* clist, const unsigned g){
unsigned int block_dim = BLOCK_DIM;
unsigned int numZ = max(globs.numX, globs.numY);
REAL dtInv = 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g]);
dim3 threadsPerBlock(block_dim, block_dim, 1);
dim3 num_blocks(ceil((float)globs.numX/block_dim), ceil((float)globs.numY/block_dim));
REAL* d_myVarY, *d_myDyy, *d_alist, *d_blist, *d_clist;
cudaMalloc((void**)&d_myVarY, globs.numX*globs.numY*sizeof(REAL));
cudaMalloc((void**)&d_myDyy, globs.numY*4*sizeof(REAL));
cudaMalloc((void**)&d_alist, numZ*numZ*sizeof(REAL));
cudaMalloc((void**)&d_blist, numZ*numZ*sizeof(REAL));
cudaMalloc((void**)&d_clist, numZ*numZ*sizeof(REAL));
cudaMemcpy(d_myVarY, globs.myVarY, globs.numX*globs.numY*sizeof(REAL), cudaMemcpyHostToDevice);
cudaMemcpy(d_myDyy, globs.myDyy, globs.numY*4*sizeof(REAL), cudaMemcpyHostToDevice);
implicitY<<<num_blocks, threadsPerBlock>>>(globs.numX, globs.numY, dtInv, d_myVarY, d_myDyy,
d_alist, d_blist, d_clist);
cudaMemcpy(alist, d_alist, numZ*numZ*sizeof(REAL), cudaMemcpyDeviceToHost);
cudaMemcpy(blist, d_blist, numZ*numZ*sizeof(REAL), cudaMemcpyDeviceToHost);
cudaMemcpy(clist, d_clist, numZ*numZ*sizeof(REAL), cudaMemcpyDeviceToHost);
cudaFree(d_myVarY);
cudaFree(d_myDyy);
cudaFree(d_alist);
cudaFree(d_blist);
cudaFree(d_clist);
}
void updateParams(const unsigned g, const REAL alpha, const REAL beta, const REAL nu, PrivGlobs& globs)
{
for(unsigned i=0;i<globs.numX;++i)
for(unsigned j=0;j<globs.numY;++j) {
globs.myVarX[i * globs.numY + j] =
exp(2.0*( beta*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
);
globs.myVarY[i * globs.numY + j] =
exp(2.0*( alpha*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
); // nu*nu
}
}
void setPayoff(PrivGlobs& globs )
{
//REAL* payoff = (REAL*) malloc(globs.outer * globs.numX*sizeof(REAL));
for(unsigned h=0;h<globs.outer;h++)
for(unsigned i=0;i<globs.numX;++i) {
for(unsigned j=0;j<globs.numY;++j)
globs.myResult[h * globs.numX * globs.numY + i * globs.numY + j] =
max(globs.myX[i]-0.001*h, (REAL)0.0);
}
}
inline void tridag(
REAL* a, // size [n]
REAL* b, // size [n]
REAL* c, // size [n]
REAL* r, // size [n]
const int n,
REAL* u, // size [n]
REAL* uu // size [n] temporary
) {
int i;//, offset;
REAL beta;
u[0] = r[0];
uu[0] = b[0];
for(i=1; i<n; i++) {
beta = a[i] / uu[i-1];
uu[i] = b[i] - beta*c[i-1];
u[i] = r[i] - beta*u[i-1];
}
#if 1
// X) this is a backward recurrence
u[n-1] = u[n-1] / uu[n-1];
for(i=n-2; i>=0; i--) {
u[i] = (u[i] - c[i]*u[i+1]) / uu[i];
}
#else
// Hint: X) can be written smth like (once you make a non-constant)
for(i=0; i<n; i++) a[i] = u[n-1-i];
a[0] = a[0] / uu[n-1];
for(i=1; i<n; i++) a[i] = (a[i] - c[n-1-i]*a[i-1]) / uu[n-1-i];
for(i=0; i<n; i++) u[i] = a[n-1-i];
#endif
}
void
rollback( const unsigned g, const unsigned h, PrivGlobs& globs) {
unsigned numX = globs.numX,
numY = globs.numY;
unsigned numZ = max(numX,numY);
unsigned i, j;
REAL dtInv = 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g]);
/*
vector< vector<REAL> > u(numY, vector<REAL>(numX)); // [numY][numX]
vector< vector<REAL> > v(numX, vector<REAL>(numY)); // [numX][numY]
vector< vector<REAL> > alist(numZ, vector<REAL>(numZ));
vector< vector<REAL> > blist(numZ, vector<REAL>(numZ));
vector< vector<REAL> > clist(numZ, vector<REAL>(numZ));
vector< vector<REAL> > ylist(numZ, vector<REAL>(numZ));
vector<REAL> yy(numZ); // temporary used in tridag // [max(numX,numY)]
*/
REAL* u = (REAL*) malloc(numY*numX*sizeof(REAL));
REAL* v = (REAL*) malloc(numY*numX*sizeof(REAL));
REAL* alist = (REAL*) malloc(numZ*numZ*sizeof(REAL));
REAL* blist = (REAL*) malloc(numZ*numZ*sizeof(REAL));
REAL* clist = (REAL*) malloc(numZ*numZ*sizeof(REAL));
REAL* ylist = (REAL*) malloc(numZ*numZ*sizeof(REAL));
REAL* yy = (REAL*) malloc(numZ*sizeof(REAL));
unsigned int expand = numX * numY;
// explicit x
for(i=0;i<numX;i++) {
for(j=0;j<numY;j++) {
u[j * numX + i] = dtInv*globs.myResult[h * expand + i * numY + j];
if(i > 0) {
u[j * numX + i] += 0.5*( 0.5*globs.myVarX[i * numY + j]
*globs.myDxx[i * 4 + 0])
* globs.myResult[h * expand + (i-1) * numY + j];
}
u[j * numX + i] += 0.5*( 0.5*globs.myVarX[i * numY + j]
*globs.myDxx[i * 4 + 1])
*globs.myResult[h * expand + i * numY + j];
if(i < numX-1) {
u[j * numX + i] += 0.5*( 0.5*globs.myVarX[i * numY + j]
*globs.myDxx[i * 4 + 2])
* globs.myResult[h * expand + (i+1) * numY + j];
}
}
}
// explicit y
for(j=0;j<numY;j++)
{
for(i=0;i<numX;i++) {
v[ i* numY + j] = 0.0;
if(j > 0) {
v[ i* numY + j] += ( 0.5*globs.myVarY[i * numY+ j]
*globs.myDyy[j * 4 + 0] )
*globs.myResult[h * expand + i * numY + (j-1)];
}
v[ i* numY + j] += ( 0.5*globs.myVarY[i * numY +j]
*globs.myDyy[j * 4 + 1] )
*globs.myResult[h * expand + i * numY + j];
if(j < numY-1) {
v[ i* numY + j] += ( 0.5*globs.myVarY[i * numY +j]
*globs.myDyy[j * 4 + 2] )
*globs.myResult[h * expand + i * numY + (j+1)];
}
u[j * numX + i] += v[ i * numY + j];
}
}
/*
// implicit x
for(j=0;j<numY;j++) {
for(i=0;i<numX;i++) { // here a,b,c should have size [numX]
alist[ j * numX + i] = - 0.5*(0.5*
globs.myVarX[ i * numY + j]*globs.myDxx[i * 4 + 0]);
blist[ j * numX + i] = dtInv - 0.5*(0.5*
globs.myVarX[i * numY + j]*globs.myDxx[i * 4 + 1]);
clist[ j * numX + i] = - 0.5*(0.5*
globs.myVarX[i * numY + j]*globs.myDxx[i * 4 + 2]);
}
//tridagPar(alist[j],blist[j],clist[j],u[j],numX,u[j],yy);
// here yy should have size [numX]
}
*/
GPUimplicitX(globs, alist, blist, clist, g);
for(j=0;j<numY;j++) {
tridag(&alist[ j*numX],&blist[ j*numX],
&clist[ j*numX],&u[ j*numX],
numX,&u[j*numX],yy);
}
/*
// implicit y
for(i=0;i<numX;i++) {
for(j=0;j<numY;j++) { // here a, b, c should have size [numY]
alist[ i * numY + j] = - 0.5*(0.5*globs.myVarY[i * numY + j]
*globs.myDyy[j * 4 + 0]);
blist[ i * numY + j] = dtInv - 0.5*(0.5
*globs.myVarY[i * numY + j]
*globs.myDyy[j * 4 + 1]);
clist[ i * numY + j] = - 0.5*(0.5
*globs.myVarY[i * numY + j]
*globs.myDyy[j * 4 + 2]);
}
}
*/
// here yy should have size [numY]
GPUimplicitY(globs, alist, blist, clist, g);
for(i=0;i<numX;i++) {
for(j=0;j<numY;j++) {
ylist[ i * numY + j] = dtInv*u[ + j * numX + i]
- 0.5*v[ i * numY + j];
}
}
for(i=0;i<numX;i++) {
tridag(&alist[ i*numY],&blist[ i*numY],
&clist[ i*numY],&ylist[ i*numY],
numY,&globs.myResult[h * expand + i*numY],yy);
}
}
REAL value( PrivGlobs globs,
const REAL s0,
const REAL strike,
const REAL t,
const REAL alpha,
const REAL nu,
const REAL beta,
const unsigned int numX,
const unsigned int numY,
const unsigned int numT
) {
initGrid(s0,alpha,nu,t, numX, numY, numT, globs);
initOperator(globs.myX,globs.myDxx, numX);
initOperator(globs.myY,globs.myDyy, numY);
setPayoff(globs);
for(int i = numT-2;i>=0;--i)
{
updateParams(i,alpha,beta,nu,globs);
//rollback(i, 2, globs);
}
return globs.myResult[globs.myXindex * numY + globs.myYindex];
}
void GPUupdateParams(const unsigned g, const REAL alpha, const REAL beta,
const REAL nu, PrivGlobs& globs)
{
unsigned int block_dim = BLOCK_DIM;
dim3 threadsPerBlock(block_dim, block_dim, 1);
dim3 num_blocks(ceil((float)globs.numX/block_dim), ceil((float)globs.numY/block_dim),1);
REAL *d_myVarX, *d_myVarY, *d_myX, *d_myY, *d_myTimeline;
cudaMalloc((void**)&d_myVarX, globs.numX*globs.numY*sizeof(REAL));
cudaMalloc((void**)&d_myVarY, globs.numX*globs.numY*sizeof(REAL));
cudaMalloc((void**)&d_myX, globs.numX*sizeof(REAL));
cudaMalloc((void**)&d_myY, globs.numY*sizeof(REAL));
cudaMalloc((void**)&d_myTimeline, globs.numT*sizeof(REAL));
cudaMemcpy(d_myVarX, globs.myVarX, globs.numX*globs.numY*sizeof(REAL), cudaMemcpyHostToDevice);
cudaMemcpy(d_myVarY, globs.myVarY, globs.numX*globs.numY*sizeof(REAL), cudaMemcpyHostToDevice);
cudaMemcpy(d_myX, globs.myX, globs.numX*sizeof(REAL), cudaMemcpyHostToDevice);
cudaMemcpy(d_myY, globs.myY, globs.numY*sizeof(REAL), cudaMemcpyHostToDevice);
cudaMemcpy(d_myTimeline, globs.myTimeline,
globs.numT*sizeof(REAL), cudaMemcpyHostToDevice);
updateParamsKer<<<num_blocks, threadsPerBlock>>>(g, alpha, beta, nu, globs.numX, globs.numY,
d_myX, d_myY, d_myVarX, d_myVarY, d_myTimeline);
cudaThreadSynchronize();
cudaMemcpy(globs.myVarX, d_myVarX, globs.numX*globs.numY*sizeof(REAL), cudaMemcpyDeviceToHost);
cudaMemcpy(globs.myVarY, d_myVarY, globs.numX*globs.numY*sizeof(REAL), cudaMemcpyDeviceToHost);
cudaFree(d_myVarX);
cudaFree(d_myVarY);
cudaFree(d_myX);
cudaFree(d_myY);
cudaFree(d_myTimeline);
}
void GPUsetParams(PrivGlobs& globs)
{
unsigned int block_dim = BLOCK_DIM;
dim3 threadsPerBlock(block_dim, block_dim, 1);
dim3 num_blocks(ceil((float)globs.numX/block_dim), ceil((float)globs.numY/block_dim),globs.outer);
REAL *d_myX, *d_myResult;
cudaMalloc((void**)&d_myResult, globs.outer*globs.numX*globs.numY*sizeof(REAL));
cudaMalloc((void**)&d_myX, globs.numX*sizeof(REAL));
cudaMemcpy(d_myResult, globs.myResult, globs.outer*globs.numX*globs.numY*sizeof(REAL), cudaMemcpyHostToDevice);
cudaMemcpy(d_myX, globs.myX, globs.numX*sizeof(REAL), cudaMemcpyHostToDevice);
setParamsKer<<<num_blocks, threadsPerBlock>>>(globs.numX, globs.numY, d_myX, d_myResult);
cudaThreadSynchronize();
cudaMemcpy(globs.myResult, d_myResult, globs.outer*globs.numX*globs.numY*sizeof(REAL), cudaMemcpyDeviceToHost);
cudaFree(d_myX);
cudaFree(d_myResult);
}
void run_OrigCPU(
const unsigned int& outer,
const unsigned int& numX,
const unsigned int& numY,
const unsigned int& numT,
const REAL& s0,
const REAL& t,
const REAL& alpha,
const REAL& nu,
const REAL& beta,
REAL* res // [outer] RESULT
) {
PrivGlobs globs(numX, numY, numT, outer);
/*
for(unsigned i = 0; i < outer;++i) {
strike[i] = 0.001*i;
//PrivGlobs globs(numX, numY, numT, outer);
//globslist[i] = globs;
}*/
initGrid(s0,alpha,nu,t, numX, numY, numT, globs);
initOperator(globs.myX,globs.myDxx, numX);
initOperator(globs.myY,globs.myDyy, numY);
//setPayoff(globs);
GPUsetParams(globs);
for(int h=0; h<outer; h++) {
for(int i = numT-2;i>=0;--i)
{
GPUupdateParams(i,alpha,beta,nu,globs);
rollback(i, h, globs);
}
}
for(unsigned i = 0; i < outer; ++i){
res[i] = globs.myResult[i * numX * numY + globs.myXindex * numY + globs.myYindex];
}
/*
for( unsigned i = 0; i < outer; ++ i ) {
res[i] = value( globslist[i], s0, strike[i], t,
alpha, nu, beta,
numX, numY, numT );
}*/
}
//#endif // PROJ_CORE_ORIG
|
8c3ba6b1290e6352e8dc8548bc041b2dc0f34b55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <scalar.h>
//scalar and current element
__device__ double op(double d1,double d2,double *params) {
if(d2 < d1) {
return 1;
}
return 0;
}
extern "C"
__global__ void lessthan_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) {
transform(n,idx,dx,dy,incx,params,result);
}
| 8c3ba6b1290e6352e8dc8548bc041b2dc0f34b55.cu | #include <scalar.h>
//scalar and current element
__device__ double op(double d1,double d2,double *params) {
if(d2 < d1) {
return 1;
}
return 0;
}
extern "C"
__global__ void lessthan_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) {
transform(n,idx,dx,dy,incx,params,result);
}
|
39e992469eee08eb2030f06c68d08369230ce508.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/masked_select_kernel.h"
namespace phi {
__global__ void SetMaskArray(const bool* mask, int32_t* mask_array, int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < size; idx += blockDim.x * gridDim.x) {
if (mask[idx])
mask_array[idx] = 1;
else
mask_array[idx] = 0;
}
}
template <typename T>
__global__ void SelectWithPrefixMask(const int32_t* mask_prefix_sum,
const bool* mask,
const T* input,
T* out,
int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < size; idx += blockDim.x * gridDim.x) {
if (mask[idx]) {
int index = mask_prefix_sum[idx];
out[index] = input[idx];
}
}
}
template <typename T, typename Context>
void MaskedSelectKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& mask,
DenseTensor* out) {
auto* mask_data = mask.data<bool>();
auto input_data = x.data<T>();
auto mask_size = mask.numel();
auto input_dim = x.dims();
auto mask_dim = mask.dims();
PADDLE_ENFORCE_EQ(input_dim,
mask_dim,
phi::errors::InvalidArgument(
"The dim size of input and mask in OP(masked_selected) "
"must be equal, but got input dim:(%ld), mask dim: "
"(%ld). Please check input "
"value.",
input_dim,
mask_dim));
thrust::device_ptr<const bool> mask_dev_ptr =
thrust::device_pointer_cast(mask_data);
thrust::device_vector<T> mask_vec(mask_dev_ptr, mask_dev_ptr + mask_size);
auto out_size = thrust::count(mask_vec.begin(), mask_vec.end(), true);
DDim out_dim{out_size};
out->Resize(out_dim);
auto out_data = out->mutable_data<T>(dev_ctx.GetPlace());
DenseTensor mask_array;
DenseTensor mask_prefix_sum;
mask_array.Resize(mask_dim);
mask_prefix_sum.Resize(mask_dim);
int32_t* mask_array_data =
mask_array.mutable_data<int32_t>(dev_ctx.GetPlace());
int32_t* mask_prefix_sum_data =
mask_prefix_sum.mutable_data<int32_t>(dev_ctx.GetPlace());
int threads = 512;
int grid = (mask_size + threads - 1) / threads;
auto stream = dev_ctx.stream();
hipLaunchKernelGGL(( SetMaskArray), dim3(grid), dim3(threads), 0, stream,
mask_data, mask_array_data, mask_size);
thrust::device_ptr<int32_t> mask_array_dev_ptr =
thrust::device_pointer_cast(mask_array_data);
thrust::device_vector<int32_t> mask_array_vec(mask_array_dev_ptr,
mask_array_dev_ptr + mask_size);
thrust::exclusive_scan(thrust::device,
mask_array_vec.begin(),
mask_array_vec.end(),
mask_prefix_sum_data);
hipLaunchKernelGGL(( SelectWithPrefixMask<T>), dim3(grid), dim3(threads), 0, stream,
mask_prefix_sum_data, mask_data, input_data, out_data, mask_size);
}
} // namespace phi
PD_REGISTER_KERNEL(masked_select,
GPU,
ALL_LAYOUT,
phi::MaskedSelectKernel,
float,
double,
int,
int64_t) {
kernel->InputAt(1).SetDataType(phi::DataType::BOOL);
}
| 39e992469eee08eb2030f06c68d08369230ce508.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/masked_select_kernel.h"
namespace phi {
__global__ void SetMaskArray(const bool* mask, int32_t* mask_array, int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < size; idx += blockDim.x * gridDim.x) {
if (mask[idx])
mask_array[idx] = 1;
else
mask_array[idx] = 0;
}
}
template <typename T>
__global__ void SelectWithPrefixMask(const int32_t* mask_prefix_sum,
const bool* mask,
const T* input,
T* out,
int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < size; idx += blockDim.x * gridDim.x) {
if (mask[idx]) {
int index = mask_prefix_sum[idx];
out[index] = input[idx];
}
}
}
template <typename T, typename Context>
void MaskedSelectKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& mask,
DenseTensor* out) {
auto* mask_data = mask.data<bool>();
auto input_data = x.data<T>();
auto mask_size = mask.numel();
auto input_dim = x.dims();
auto mask_dim = mask.dims();
PADDLE_ENFORCE_EQ(input_dim,
mask_dim,
phi::errors::InvalidArgument(
"The dim size of input and mask in OP(masked_selected) "
"must be equal, but got input dim:(%ld), mask dim: "
"(%ld). Please check input "
"value.",
input_dim,
mask_dim));
thrust::device_ptr<const bool> mask_dev_ptr =
thrust::device_pointer_cast(mask_data);
thrust::device_vector<T> mask_vec(mask_dev_ptr, mask_dev_ptr + mask_size);
auto out_size = thrust::count(mask_vec.begin(), mask_vec.end(), true);
DDim out_dim{out_size};
out->Resize(out_dim);
auto out_data = out->mutable_data<T>(dev_ctx.GetPlace());
DenseTensor mask_array;
DenseTensor mask_prefix_sum;
mask_array.Resize(mask_dim);
mask_prefix_sum.Resize(mask_dim);
int32_t* mask_array_data =
mask_array.mutable_data<int32_t>(dev_ctx.GetPlace());
int32_t* mask_prefix_sum_data =
mask_prefix_sum.mutable_data<int32_t>(dev_ctx.GetPlace());
int threads = 512;
int grid = (mask_size + threads - 1) / threads;
auto stream = dev_ctx.stream();
SetMaskArray<<<grid, threads, 0, stream>>>(
mask_data, mask_array_data, mask_size);
thrust::device_ptr<int32_t> mask_array_dev_ptr =
thrust::device_pointer_cast(mask_array_data);
thrust::device_vector<int32_t> mask_array_vec(mask_array_dev_ptr,
mask_array_dev_ptr + mask_size);
thrust::exclusive_scan(thrust::device,
mask_array_vec.begin(),
mask_array_vec.end(),
mask_prefix_sum_data);
SelectWithPrefixMask<T><<<grid, threads, 0, stream>>>(
mask_prefix_sum_data, mask_data, input_data, out_data, mask_size);
}
} // namespace phi
PD_REGISTER_KERNEL(masked_select,
GPU,
ALL_LAYOUT,
phi::MaskedSelectKernel,
float,
double,
int,
int64_t) {
kernel->InputAt(1).SetDataType(phi::DataType::BOOL);
}
|
2fcd3290c8c842f22f68de4402dc6307221eb434.hip | // !!! This is a file automatically generated by hipify!!!
/* A Bison parser, made by GNU Bison 2.4.1. */
/* Skeleton implementation for Bison's Yacc-like parsers in C
Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* C LALR(1) parser skeleton written by Richard Stallman, by
simplifying the original so-called "semantic" parser. */
/* All symbols defined below should begin with yy or YY, to avoid
infringing on user name space. This should be done even for local
variables, as they might otherwise be expanded by user macros.
There are some unavoidable exceptions within include files to
define necessary library symbols; they are noted "INFRINGES ON
USER NAME SPACE" below. */
/* Identify Bison output. */
#define YYBISON 1
/* Bison version. */
#define YYBISON_VERSION "2.4.1"
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
/* Pure parsers. */
#define YYPURE 0
/* Push parsers. */
#define YYPUSH 0
/* Pull parsers. */
#define YYPULL 1
/* Using locations. */
#define YYLSP_NEEDED 0
/* Copy the first part of user declarations. */
/* Line 189 of yacc.c */
#line 17 "c:\\GnuWin32\\bin\\alenka\\bison.y"
#include "lex.yy.c"
#include "cm.h"
void clean_queues();
void order_inplace(CudaSet* a, stack<string> exe_type);
void yyerror(char *s, ...);
void emit(char *s, ...);
void emit_mul();
void emit_add();
void emit_minus();
void emit_distinct();
void emit_div();
void emit_and();
void emit_eq();
void emit_or();
void emit_cmp(int val);
void emit_var(char *s, int c, char *f);
void emit_var_asc(char *s);
void emit_var_desc(char *s);
void emit_name(char *name);
void emit_count();
void emit_sum();
void emit_average();
void emit_min();
void emit_max();
void emit_string(char *str);
void emit_number(int_type val);
void emit_float(float_type val);
void emit_decimal(float_type val);
void emit_sel_name(char* name);
void emit_limit(int val);
void emit_union(char *s, char *f1, char *f2);
void emit_varchar(char *s, int c, char *f, int d);
void emit_load(char *s, char *f, int d, char* sep);
void emit_load_binary(char *s, char *f, int d);
void emit_store(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f);
void emit_filter(char *s, char *f, int e);
void emit_order(char *s, char *f, int e, int ll = 0);
void emit_group(char *s, char *f, int e);
void emit_select(char *s, char *f, int ll);
void emit_join(char *s, char *j1, int grp);
void emit_join_tab(char *s, bool left);
void emit_distinct();
/* Line 189 of yacc.c */
#line 124 "c:\\GnuWin32\\bin\\alenka\\bison.cu"
/* Enabling traces. */
#ifndef YYDEBUG
# define YYDEBUG 0
#endif
/* Enabling verbose error messages. */
#ifdef YYERROR_VERBOSE
# undef YYERROR_VERBOSE
# define YYERROR_VERBOSE 1
#else
# define YYERROR_VERBOSE 0
#endif
/* Enabling the token table. */
#ifndef YYTOKEN_TABLE
# define YYTOKEN_TABLE 0
#endif
/* Tokens. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
/* Put the tokens into the symbol table, so that GDB and other debuggers
know about them. */
enum yytokentype {
FILENAME = 258,
NAME = 259,
STRING = 260,
INTNUM = 261,
DECIMAL1 = 262,
BOOL1 = 263,
APPROXNUM = 264,
USERVAR = 265,
ASSIGN = 266,
EQUAL = 267,
OR = 268,
XOR = 269,
AND = 270,
DISTINCT = 271,
REGEXP = 272,
LIKE = 273,
IS = 274,
IN = 275,
NOT = 276,
BETWEEN = 277,
COMPARISON = 278,
SHIFT = 279,
MOD = 280,
UMINUS = 281,
LOAD = 282,
STREAM = 283,
FILTER = 284,
BY = 285,
JOIN = 286,
STORE = 287,
INTO = 288,
GROUP = 289,
FROM = 290,
SELECT = 291,
AS = 292,
ORDER = 293,
ASC = 294,
DESC = 295,
COUNT = 296,
USING = 297,
SUM = 298,
AVG = 299,
MIN = 300,
MAX = 301,
LIMIT = 302,
ON = 303,
BINARY = 304,
LEFT = 305
};
#endif
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE
{
/* Line 214 of yacc.c */
#line 67 "c:\\GnuWin32\\bin\\alenka\\bison.y"
int intval;
float floatval;
char *strval;
int subtok;
/* Line 214 of yacc.c */
#line 219 "c:\\GnuWin32\\bin\\alenka\\bison.cu"
} YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
#endif
/* Copy the second part of user declarations. */
/* Line 264 of yacc.c */
#line 231 "c:\\GnuWin32\\bin\\alenka\\bison.cu"
#ifdef short
# undef short
#endif
#ifdef YYTYPE_UINT8
typedef YYTYPE_UINT8 yytype_uint8;
#else
typedef unsigned char yytype_uint8;
#endif
#ifdef YYTYPE_INT8
typedef YYTYPE_INT8 yytype_int8;
#elif (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
typedef signed char yytype_int8;
#else
typedef short int yytype_int8;
#endif
#ifdef YYTYPE_UINT16
typedef YYTYPE_UINT16 yytype_uint16;
#else
typedef unsigned short int yytype_uint16;
#endif
#ifdef YYTYPE_INT16
typedef YYTYPE_INT16 yytype_int16;
#else
typedef short int yytype_int16;
#endif
#ifndef YYSIZE_T
# ifdef __SIZE_TYPE__
# define YYSIZE_T __SIZE_TYPE__
# elif defined size_t
# define YYSIZE_T size_t
# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
# define YYSIZE_T size_t
# else
# define YYSIZE_T unsigned int
# endif
#endif
#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
#ifndef YY_
# if YYENABLE_NLS
# if ENABLE_NLS
# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
# define YY_(msgid) dgettext ("bison-runtime", msgid)
# endif
# endif
# ifndef YY_
# define YY_(msgid) msgid
# endif
#endif
/* Suppress unused-variable warnings by "using" E. */
#if ! defined lint || defined __GNUC__
# define YYUSE(e) ((void) (e))
#else
# define YYUSE(e) /* empty */
#endif
/* Identity function, used to suppress warnings about constant conditions. */
#ifndef lint
# define YYID(n) (n)
#else
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static int
YYID (int yyi)
#else
static int
YYID (yyi)
int yyi;
#endif
{
return yyi;
}
#endif
#if ! defined yyoverflow || YYERROR_VERBOSE
/* The parser invokes alloca or malloc; define the necessary symbols. */
# ifdef YYSTACK_USE_ALLOCA
# if YYSTACK_USE_ALLOCA
# ifdef __GNUC__
# define YYSTACK_ALLOC __builtin_alloca
# elif defined __BUILTIN_VA_ARG_INCR
# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
# elif defined _AIX
# define YYSTACK_ALLOC __alloca
# elif defined _MSC_VER
# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
# define alloca _alloca
# else
# define YYSTACK_ALLOC alloca
# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# endif
# endif
# endif
# ifdef YYSTACK_ALLOC
/* Pacify GCC's `empty if-body' warning. */
# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
# ifndef YYSTACK_ALLOC_MAXIMUM
/* The OS might guarantee only one guard page at the bottom of the stack,
and a page size can be as small as 4096 bytes. So we cannot safely
invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
to allow for a few compiler-allocated temporary stack slots. */
# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
# endif
# else
# define YYSTACK_ALLOC YYMALLOC
# define YYSTACK_FREE YYFREE
# ifndef YYSTACK_ALLOC_MAXIMUM
# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
# endif
# if (defined __cplusplus && ! defined _STDLIB_H \
&& ! ((defined YYMALLOC || defined malloc) \
&& (defined YYFREE || defined free)))
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# ifndef YYMALLOC
# define YYMALLOC malloc
# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# ifndef YYFREE
# define YYFREE free
# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void free (void *); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# endif
#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
#if (! defined yyoverflow \
&& (! defined __cplusplus \
|| (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
/* A type that is properly aligned for any stack member. */
union yyalloc
{
yytype_int16 yyss_alloc;
YYSTYPE yyvs_alloc;
};
/* The size of the maximum gap between one aligned stack and the next. */
# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
/* The size of an array large to enough to hold all stacks, each with
N elements. */
# define YYSTACK_BYTES(N) \
((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ YYSTACK_GAP_MAXIMUM)
/* Copy COUNT objects from FROM to TO. The source and destination do
not overlap. */
# ifndef YYCOPY
# if defined __GNUC__ && 1 < __GNUC__
# define YYCOPY(To, From, Count) \
__builtin_memcpy (To, From, (Count) * sizeof (*(From)))
# else
# define YYCOPY(To, From, Count) \
do \
{ \
YYSIZE_T yyi; \
for (yyi = 0; yyi < (Count); yyi++) \
(To)[yyi] = (From)[yyi]; \
} \
while (YYID (0))
# endif
# endif
/* Relocate STACK from its old location to the new one. The
local variables YYSIZE and YYSTACKSIZE give the old and new number of
elements in the stack, and YYPTR gives the new location of the
stack. Advance YYPTR to a properly aligned location for the next
stack. */
# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
do \
{ \
YYSIZE_T yynewbytes; \
YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
Stack = &yyptr->Stack_alloc; \
yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
yyptr += yynewbytes / sizeof (*yyptr); \
} \
while (YYID (0))
#endif
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 8
/* YYLAST -- Last index in YYTABLE. */
#define YYLAST 457
/* YYNTOKENS -- Number of terminals. */
#define YYNTOKENS 68
/* YYNNTS -- Number of nonterminals. */
#define YYNNTS 13
/* YYNRULES -- Number of rules. */
#define YYNRULES 64
/* YYNRULES -- Number of states. */
#define YYNSTATES 161
/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
#define YYUNDEFTOK 2
#define YYMAXUTOK 305
#define YYTRANSLATE(YYX) \
((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
static const yytype_uint8 yytranslate[] =
{
0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 21, 2, 2, 2, 32, 26, 2,
61, 62, 30, 28, 67, 29, 63, 31, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 66, 60,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 34, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 64, 25, 65, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 22, 23, 24, 27,
33, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59
};
#if YYDEBUG
/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
YYRHS. */
static const yytype_uint16 yyprhs[] =
{
0, 0, 3, 6, 10, 12, 20, 33, 43, 49,
56, 65, 75, 82, 84, 88, 90, 92, 94, 96,
98, 100, 110, 117, 120, 123, 128, 133, 138, 143,
148, 151, 155, 159, 163, 167, 171, 175, 179, 183,
187, 191, 195, 198, 201, 205, 211, 215, 219, 224,
225, 229, 233, 239, 241, 245, 247, 251, 252, 254,
257, 262, 268, 274, 275
};
/* YYRHS -- A `-1'-separated list of the rules' RHS. */
static const yytype_int8 yyrhs[] =
{
69, 0, -1, 70, 60, -1, 69, 70, 60, -1,
71, -1, 4, 11, 45, 74, 44, 4, 73, -1,
4, 11, 36, 3, 51, 61, 3, 62, 46, 61,
75, 62, -1, 4, 11, 36, 3, 58, 46, 61,
75, 62, -1, 4, 11, 38, 4, 78, -1, 4,
11, 47, 4, 39, 77, -1, 4, 11, 45, 74,
44, 4, 79, 73, -1, 41, 4, 42, 3, 51,
61, 3, 62, 80, -1, 41, 4, 42, 3, 80,
58, -1, 4, -1, 4, 63, 4, -1, 10, -1,
5, -1, 6, -1, 9, -1, 7, -1, 8, -1,
4, 64, 6, 65, 66, 4, 61, 6, 62, -1,
4, 64, 6, 65, 66, 4, -1, 4, 48, -1,
4, 49, -1, 50, 61, 72, 62, -1, 52, 61,
72, 62, -1, 53, 61, 72, 62, -1, 54, 61,
72, 62, -1, 55, 61, 72, 62, -1, 16, 72,
-1, 72, 28, 72, -1, 72, 29, 72, -1, 72,
30, 72, -1, 72, 31, 72, -1, 72, 32, 72,
-1, 72, 33, 72, -1, 72, 15, 72, -1, 72,
12, 72, -1, 72, 13, 72, -1, 72, 14, 72,
-1, 72, 27, 72, -1, 22, 72, -1, 21, 72,
-1, 72, 24, 72, -1, 72, 24, 61, 71, 62,
-1, 61, 72, 62, -1, 72, 19, 8, -1, 72,
19, 22, 8, -1, -1, 43, 39, 76, -1, 72,
46, 4, -1, 74, 67, 72, 46, 4, -1, 72,
-1, 75, 67, 72, -1, 72, -1, 72, 67, 76,
-1, -1, 76, -1, 39, 72, -1, 40, 4, 57,
72, -1, 59, 40, 4, 57, 72, -1, 40, 4,
57, 72, 79, -1, -1, 56, 6, -1
};
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const yytype_uint8 yyrline[] =
{
0, 140, 140, 141, 145, 148, 150, 152, 154, 156,
158, 160, 162, 167, 168, 169, 170, 171, 172, 173,
174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
184, 188, 189, 190, 191, 192, 193, 195, 196, 197,
198, 199, 200, 201, 202, 204, 205, 209, 210, 213,
216, 220, 221, 225, 226, 230, 231, 234, 236, 239,
242, 243, 244, 246, 249
};
#endif
#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
First, the terminals, then, starting at YYNTOKENS, nonterminals. */
static const char *const yytname[] =
{
"$end", "error", "$undefined", "FILENAME", "NAME", "STRING", "INTNUM",
"DECIMAL1", "BOOL1", "APPROXNUM", "USERVAR", "ASSIGN", "EQUAL", "OR",
"XOR", "AND", "DISTINCT", "REGEXP", "LIKE", "IS", "IN", "'!'", "NOT",
"BETWEEN", "COMPARISON", "'|'", "'&'", "SHIFT", "'+'", "'-'", "'*'",
"'/'", "'%'", "MOD", "'^'", "UMINUS", "LOAD", "STREAM", "FILTER", "BY",
"JOIN", "STORE", "INTO", "GROUP", "FROM", "SELECT", "AS", "ORDER", "ASC",
"DESC", "COUNT", "USING", "SUM", "AVG", "MIN", "MAX", "LIMIT", "ON",
"BINARY", "LEFT", "';'", "'('", "')'", "'.'", "'{'", "'}'", "':'", "','",
"$accept", "stmt_list", "stmt", "select_stmt", "expr", "opt_group_list",
"expr_list", "load_list", "val_list", "opt_val_list", "opt_where",
"join_list", "opt_limit", 0
};
#endif
# ifdef YYPRINT
/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
token YYLEX-NUM. */
static const yytype_uint16 yytoknum[] =
{
0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
275, 33, 276, 277, 278, 124, 38, 279, 43, 45,
42, 47, 37, 280, 94, 281, 282, 283, 284, 285,
286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
59, 40, 41, 46, 123, 125, 58, 44
};
# endif
/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
static const yytype_uint8 yyr1[] =
{
0, 68, 69, 69, 70, 71, 71, 71, 71, 71,
71, 71, 71, 72, 72, 72, 72, 72, 72, 72,
72, 72, 72, 72, 72, 72, 72, 72, 72, 72,
72, 72, 72, 72, 72, 72, 72, 72, 72, 72,
72, 72, 72, 72, 72, 72, 72, 72, 72, 73,
73, 74, 74, 75, 75, 76, 76, 77, 77, 78,
79, 79, 79, 80, 80
};
/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
static const yytype_uint8 yyr2[] =
{
0, 2, 2, 3, 1, 7, 12, 9, 5, 6,
8, 9, 6, 1, 3, 1, 1, 1, 1, 1,
1, 9, 6, 2, 2, 4, 4, 4, 4, 4,
2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 2, 2, 3, 5, 3, 3, 4, 0,
3, 3, 5, 1, 3, 1, 3, 0, 1, 2,
4, 5, 5, 0, 2
};
/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
STATE-NUM when YYTABLE doesn't specify something else to do. Zero
means the default is an error. */
static const yytype_uint8 yydefact[] =
{
0, 0, 0, 0, 0, 4, 0, 0, 1, 0,
2, 0, 0, 0, 0, 0, 3, 0, 0, 13,
16, 17, 19, 20, 18, 15, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 63, 0,
0, 0, 8, 23, 24, 0, 0, 30, 43, 42,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 57, 0, 0, 0, 0, 0, 59, 14,
0, 0, 0, 0, 0, 0, 46, 38, 39, 40,
37, 47, 0, 0, 44, 41, 31, 32, 33, 34,
35, 36, 51, 49, 0, 55, 58, 9, 0, 64,
12, 0, 0, 0, 25, 26, 27, 28, 29, 48,
13, 0, 0, 0, 0, 5, 49, 0, 0, 0,
0, 53, 0, 0, 45, 0, 0, 0, 10, 52,
56, 63, 0, 7, 0, 22, 0, 50, 0, 11,
0, 54, 0, 60, 0, 0, 0, 62, 61, 6,
21
};
/* YYDEFGOTO[NTERM-NUM]. */
static const yytype_int16 yydefgoto[] =
{
-1, 3, 4, 5, 105, 125, 36, 132, 106, 107,
42, 126, 75
};
/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
STATE-NUM. */
#define YYPACT_NINF -126
static const yytype_int16 yypact[] =
{
6, -3, 5, 1, -44, -126, 59, -18, -126, -34,
-126, 53, 64, 113, 65, 54, -126, -39, 31, 37,
-126, -126, -126, -126, -126, -126, 113, 113, 113, 33,
41, 67, 71, 75, 113, 337, -42, 76, 27, 77,
78, 113, -126, -126, -126, 122, 133, 401, 411, 411,
113, 113, 113, 113, 113, 182, 113, 113, 113, 113,
-2, 138, 113, 113, 113, 113, 113, 113, 113, 136,
145, 113, 113, 89, 146, 93, 150, 94, 386, -126,
91, 204, 226, 248, 270, 292, -126, 386, 3, 154,
401, -126, 149, 55, 418, 424, 81, 81, -126, -126,
-126, -126, -126, -36, 363, 60, -126, -126, 155, -126,
-126, 99, 113, 96, -126, -126, -126, -126, -126, -126,
18, 102, 166, 132, 135, -126, 129, 172, 113, 115,
134, 386, 36, 175, -126, 141, 113, 185, -126, -126,
-126, 144, 142, -126, 113, 143, 113, -126, 148, -126,
113, 386, 196, 315, 113, 63, 158, -126, 386, -126,
-126
};
/* YYPGOTO[NTERM-NUM]. */
static const yytype_int16 yypgoto[] =
{
-126, -126, 205, 114, -13, 95, -126, 72, -125, -126,
-126, 73, 83
};
/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
positive, shift that token. If negative, reduce the rule which
number is the opposite. If zero, do what YYDEFACT says.
If YYTABLE_NINF, syntax error. */
#define YYTABLE_NINF -1
static const yytype_uint8 yytable[] =
{
35, 8, 70, 140, 122, 1, 91, 123, 6, 7,
1, 147, 39, 47, 48, 49, 10, 58, 59, 40,
92, 55, 60, 124, 15, 71, 16, 61, 78, 6,
62, 63, 64, 65, 66, 67, 68, 81, 82, 83,
84, 85, 2, 87, 88, 89, 90, 2, 94, 95,
96, 97, 98, 99, 100, 101, 17, 38, 104, 120,
20, 21, 22, 23, 24, 25, 43, 44, 18, 37,
41, 26, 56, 57, 58, 59, 27, 28, 73, 60,
55, 45, 46, 74, 61, 43, 44, 62, 63, 64,
65, 66, 67, 68, 50, 11, 2, 12, 143, 131,
45, 46, 51, 144, 13, 29, 14, 30, 31, 32,
33, 65, 66, 67, 68, 72, 34, 19, 20, 21,
22, 23, 24, 25, 77, 159, 79, 128, 52, 26,
144, 151, 53, 153, 27, 28, 54, 131, 76, 80,
102, 158, 19, 20, 21, 22, 23, 24, 25, 103,
108, 110, 109, 111, 26, 112, 113, 119, 129, 27,
28, 130, 133, 29, 134, 30, 31, 32, 33, 59,
135, 136, 123, 60, 34, 137, 139, 141, 61, 145,
142, 62, 63, 64, 65, 66, 67, 68, 29, 148,
30, 31, 32, 33, 56, 57, 58, 59, 146, 93,
74, 60, 156, 150, 152, 154, 61, 121, 9, 62,
63, 64, 65, 66, 67, 68, 56, 57, 58, 59,
160, 138, 155, 60, 149, 0, 157, 0, 61, 0,
0, 62, 63, 64, 65, 66, 67, 68, 56, 57,
58, 59, 0, 0, 86, 60, 0, 0, 0, 0,
61, 0, 0, 62, 63, 64, 65, 66, 67, 68,
56, 57, 58, 59, 0, 0, 114, 60, 0, 0,
0, 0, 61, 0, 0, 62, 63, 64, 65, 66,
67, 68, 56, 57, 58, 59, 0, 0, 115, 60,
0, 0, 0, 0, 61, 0, 0, 62, 63, 64,
65, 66, 67, 68, 56, 57, 58, 59, 0, 0,
116, 60, 0, 0, 0, 0, 61, 0, 0, 62,
63, 64, 65, 66, 67, 68, 0, 56, 57, 58,
59, 0, 117, 0, 60, 0, 0, 0, 0, 61,
0, 0, 62, 63, 64, 65, 66, 67, 68, 56,
57, 58, 59, 0, 118, 122, 60, 0, 0, 0,
0, 61, 0, 0, 62, 63, 64, 65, 66, 67,
68, 0, 0, 0, 124, 56, 57, 58, 59, 0,
0, 0, 60, 69, 0, 0, 0, 61, 0, 0,
62, 63, 64, 65, 66, 67, 68, 0, 56, 57,
58, 59, 0, 0, 0, 60, 0, 0, 0, 127,
61, 0, 0, 62, 63, 64, 65, 66, 67, 68,
60, 0, 0, 0, 0, 61, 0, 0, 62, 63,
64, 65, 66, 67, 68, 61, 0, 0, 62, 63,
64, 65, 66, 67, 68, 62, 63, 64, 65, 66,
67, 68, 63, 64, 65, 66, 67, 68
};
static const yytype_int16 yycheck[] =
{
13, 0, 44, 128, 40, 4, 8, 43, 11, 4,
4, 136, 51, 26, 27, 28, 60, 14, 15, 58,
22, 34, 19, 59, 42, 67, 60, 24, 41, 11,
27, 28, 29, 30, 31, 32, 33, 50, 51, 52,
53, 54, 41, 56, 57, 58, 59, 41, 61, 62,
63, 64, 65, 66, 67, 68, 3, 3, 71, 4,
5, 6, 7, 8, 9, 10, 48, 49, 4, 4,
39, 16, 12, 13, 14, 15, 21, 22, 51, 19,
93, 63, 64, 56, 24, 48, 49, 27, 28, 29,
30, 31, 32, 33, 61, 36, 41, 38, 62, 112,
63, 64, 61, 67, 45, 50, 47, 52, 53, 54,
55, 30, 31, 32, 33, 39, 61, 4, 5, 6,
7, 8, 9, 10, 46, 62, 4, 67, 61, 16,
67, 144, 61, 146, 21, 22, 61, 150, 61, 6,
4, 154, 4, 5, 6, 7, 8, 9, 10, 4,
61, 58, 6, 3, 16, 61, 65, 8, 3, 21,
22, 62, 66, 50, 62, 52, 53, 54, 55, 15,
4, 39, 43, 19, 61, 40, 4, 62, 24, 4,
46, 27, 28, 29, 30, 31, 32, 33, 50, 4,
52, 53, 54, 55, 12, 13, 14, 15, 57, 61,
56, 19, 6, 61, 61, 57, 24, 93, 3, 27,
28, 29, 30, 31, 32, 33, 12, 13, 14, 15,
62, 126, 150, 19, 141, -1, 153, -1, 24, -1,
-1, 27, 28, 29, 30, 31, 32, 33, 12, 13,
14, 15, -1, -1, 62, 19, -1, -1, -1, -1,
24, -1, -1, 27, 28, 29, 30, 31, 32, 33,
12, 13, 14, 15, -1, -1, 62, 19, -1, -1,
-1, -1, 24, -1, -1, 27, 28, 29, 30, 31,
32, 33, 12, 13, 14, 15, -1, -1, 62, 19,
-1, -1, -1, -1, 24, -1, -1, 27, 28, 29,
30, 31, 32, 33, 12, 13, 14, 15, -1, -1,
62, 19, -1, -1, -1, -1, 24, -1, -1, 27,
28, 29, 30, 31, 32, 33, -1, 12, 13, 14,
15, -1, 62, -1, 19, -1, -1, -1, -1, 24,
-1, -1, 27, 28, 29, 30, 31, 32, 33, 12,
13, 14, 15, -1, 62, 40, 19, -1, -1, -1,
-1, 24, -1, -1, 27, 28, 29, 30, 31, 32,
33, -1, -1, -1, 59, 12, 13, 14, 15, -1,
-1, -1, 19, 46, -1, -1, -1, 24, -1, -1,
27, 28, 29, 30, 31, 32, 33, -1, 12, 13,
14, 15, -1, -1, -1, 19, -1, -1, -1, 46,
24, -1, -1, 27, 28, 29, 30, 31, 32, 33,
19, -1, -1, -1, -1, 24, -1, -1, 27, 28,
29, 30, 31, 32, 33, 24, -1, -1, 27, 28,
29, 30, 31, 32, 33, 27, 28, 29, 30, 31,
32, 33, 28, 29, 30, 31, 32, 33
};
/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
symbol of state STATE-NUM. */
static const yytype_uint8 yystos[] =
{
0, 4, 41, 69, 70, 71, 11, 4, 0, 70,
60, 36, 38, 45, 47, 42, 60, 3, 4, 4,
5, 6, 7, 8, 9, 10, 16, 21, 22, 50,
52, 53, 54, 55, 61, 72, 74, 4, 3, 51,
58, 39, 78, 48, 49, 63, 64, 72, 72, 72,
61, 61, 61, 61, 61, 72, 12, 13, 14, 15,
19, 24, 27, 28, 29, 30, 31, 32, 33, 46,
44, 67, 39, 51, 56, 80, 61, 46, 72, 4,
6, 72, 72, 72, 72, 72, 62, 72, 72, 72,
72, 8, 22, 61, 72, 72, 72, 72, 72, 72,
72, 72, 4, 4, 72, 72, 76, 77, 61, 6,
58, 3, 61, 65, 62, 62, 62, 62, 62, 8,
4, 71, 40, 43, 59, 73, 79, 46, 67, 3,
62, 72, 75, 66, 62, 4, 39, 40, 73, 4,
76, 62, 46, 62, 67, 4, 57, 76, 4, 80,
61, 72, 61, 72, 57, 75, 6, 79, 72, 62,
62
};
#define yyerrok (yyerrstatus = 0)
#define yyclearin (yychar = YYEMPTY)
#define YYEMPTY (-2)
#define YYEOF 0
#define YYACCEPT goto yyacceptlab
#define YYABORT goto yyabortlab
#define YYERROR goto yyerrorlab
/* Like YYERROR except do call yyerror. This remains here temporarily
to ease the transition to the new meaning of YYERROR, for GCC.
Once GCC version 2 has supplanted version 1, this can go. */
#define YYFAIL goto yyerrlab
#define YYRECOVERING() (!!yyerrstatus)
#define YYBACKUP(Token, Value) \
do \
if (yychar == YYEMPTY && yylen == 1) \
{ \
yychar = (Token); \
yylval = (Value); \
yytoken = YYTRANSLATE (yychar); \
YYPOPSTACK (1); \
goto yybackup; \
} \
else \
{ \
yyerror (YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (YYID (0))
#define YYTERROR 1
#define YYERRCODE 256
/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
If N is 0, then set CURRENT to the empty location which ends
the previous symbol: RHS[0] (always defined). */
#define YYRHSLOC(Rhs, K) ((Rhs)[K])
#ifndef YYLLOC_DEFAULT
# define YYLLOC_DEFAULT(Current, Rhs, N) \
do \
if (YYID (N)) \
{ \
(Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
(Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
(Current).last_line = YYRHSLOC (Rhs, N).last_line; \
(Current).last_column = YYRHSLOC (Rhs, N).last_column; \
} \
else \
{ \
(Current).first_line = (Current).last_line = \
YYRHSLOC (Rhs, 0).last_line; \
(Current).first_column = (Current).last_column = \
YYRHSLOC (Rhs, 0).last_column; \
} \
while (YYID (0))
#endif
/* YY_LOCATION_PRINT -- Print the location on the stream.
This macro was not mandated originally: define only if we know
we won't break user code: when these are the locations we know. */
#ifndef YY_LOCATION_PRINT
# if YYLTYPE_IS_TRIVIAL
# define YY_LOCATION_PRINT(File, Loc) \
fprintf (File, "%d.%d-%d.%d", \
(Loc).first_line, (Loc).first_column, \
(Loc).last_line, (Loc).last_column)
# else
# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
# endif
#endif
/* YYLEX -- calling `yylex' with the right arguments. */
#ifdef YYLEX_PARAM
# define YYLEX yylex (YYLEX_PARAM)
#else
# define YYLEX yylex ()
#endif
/* Enable debugging if requested. */
#if YYDEBUG
# ifndef YYFPRINTF
# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
# define YYFPRINTF fprintf
# endif
# define YYDPRINTF(Args) \
do { \
if (yydebug) \
YYFPRINTF Args; \
} while (YYID (0))
# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
do { \
if (yydebug) \
{ \
YYFPRINTF (stderr, "%s ", Title); \
yy_symbol_print (stderr, \
Type, Value); \
YYFPRINTF (stderr, "\n"); \
} \
} while (YYID (0))
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_value_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (!yyvaluep)
return;
# ifdef YYPRINT
if (yytype < YYNTOKENS)
YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
# else
YYUSE (yyoutput);
# endif
switch (yytype)
{
default:
break;
}
}
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (yytype < YYNTOKENS)
YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
else
YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
yy_symbol_value_print (yyoutput, yytype, yyvaluep);
YYFPRINTF (yyoutput, ")");
}
/*------------------------------------------------------------------.
| yy_stack_print -- Print the state stack from its BOTTOM up to its |
| TOP (included). |
`------------------------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
#else
static void
yy_stack_print (yybottom, yytop)
yytype_int16 *yybottom;
yytype_int16 *yytop;
#endif
{
YYFPRINTF (stderr, "Stack now");
for (; yybottom <= yytop; yybottom++)
{
int yybot = *yybottom;
YYFPRINTF (stderr, " %d", yybot);
}
YYFPRINTF (stderr, "\n");
}
# define YY_STACK_PRINT(Bottom, Top) \
do { \
if (yydebug) \
yy_stack_print ((Bottom), (Top)); \
} while (YYID (0))
/*------------------------------------------------.
| Report that the YYRULE is going to be reduced. |
`------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
#else
static void
yy_reduce_print (yyvsp, yyrule)
YYSTYPE *yyvsp;
int yyrule;
#endif
{
int yynrhs = yyr2[yyrule];
int yyi;
unsigned long int yylno = yyrline[yyrule];
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
&(yyvsp[(yyi + 1) - (yynrhs)])
);
YYFPRINTF (stderr, "\n");
}
}
# define YY_REDUCE_PRINT(Rule) \
do { \
if (yydebug) \
yy_reduce_print (yyvsp, Rule); \
} while (YYID (0))
/* Nonzero means print parse trace. It is left uninitialized so that
multiple parsers can coexist. */
int yydebug;
#else /* !YYDEBUG */
# define YYDPRINTF(Args)
# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
# define YY_STACK_PRINT(Bottom, Top)
# define YY_REDUCE_PRINT(Rule)
#endif /* !YYDEBUG */
/* YYINITDEPTH -- initial size of the parser's stacks. */
#ifndef YYINITDEPTH
# define YYINITDEPTH 200
#endif
/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
if the built-in stack extension method is used).
Do not make this value too large; the results are undefined if
YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
evaluated with infinite-precision integer arithmetic. */
#ifndef YYMAXDEPTH
# define YYMAXDEPTH 10000
#endif
#if YYERROR_VERBOSE
# ifndef yystrlen
# if defined __GLIBC__ && defined _STRING_H
# define yystrlen strlen
# else
/* Return the length of YYSTR. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static YYSIZE_T
yystrlen (const char *yystr)
#else
static YYSIZE_T
yystrlen (yystr)
const char *yystr;
#endif
{
YYSIZE_T yylen;
for (yylen = 0; yystr[yylen]; yylen++)
continue;
return yylen;
}
# endif
# endif
# ifndef yystpcpy
# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
# define yystpcpy stpcpy
# else
/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
YYDEST. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static char *
yystpcpy (char *yydest, const char *yysrc)
#else
static char *
yystpcpy (yydest, yysrc)
char *yydest;
const char *yysrc;
#endif
{
char *yyd = yydest;
const char *yys = yysrc;
while ((*yyd++ = *yys++) != '\0')
continue;
return yyd - 1;
}
# endif
# endif
# ifndef yytnamerr
/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
quotes and backslashes, so that it's suitable for yyerror. The
heuristic is that double-quoting is unnecessary unless the string
contains an apostrophe, a comma, or backslash (other than
backslash-backslash). YYSTR is taken from yytname. If YYRES is
null, do not copy; instead, return the length of what the result
would have been. */
static YYSIZE_T
yytnamerr (char *yyres, const char *yystr)
{
if (*yystr == '"')
{
YYSIZE_T yyn = 0;
char const *yyp = yystr;
for (;;)
switch (*++yyp)
{
case '\'':
case ',':
goto do_not_strip_quotes;
case '\\':
if (*++yyp != '\\')
goto do_not_strip_quotes;
/* Fall through. */
default:
if (yyres)
yyres[yyn] = *yyp;
yyn++;
break;
case '"':
if (yyres)
yyres[yyn] = '\0';
return yyn;
}
do_not_strip_quotes: ;
}
if (! yyres)
return yystrlen (yystr);
return yystpcpy (yyres, yystr) - yyres;
}
# endif
/* Copy into YYRESULT an error message about the unexpected token
YYCHAR while in state YYSTATE. Return the number of bytes copied,
including the terminating null byte. If YYRESULT is null, do not
copy anything; just return the number of bytes that would be
copied. As a special case, return 0 if an ordinary "syntax error"
message will do. Return YYSIZE_MAXIMUM if overflow occurs during
size calculation. */
static YYSIZE_T
yysyntax_error (char *yyresult, int yystate, int yychar)
{
int yyn = yypact[yystate];
if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
return 0;
else
{
int yytype = YYTRANSLATE (yychar);
YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
YYSIZE_T yysize = yysize0;
YYSIZE_T yysize1;
int yysize_overflow = 0;
enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
int yyx;
# if 0
/* This is so xgettext sees the translatable formats that are
constructed on the fly. */
YY_("syntax error, unexpected %s");
YY_("syntax error, unexpected %s, expecting %s");
YY_("syntax error, unexpected %s, expecting %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
# endif
char *yyfmt;
char const *yyf;
static char const yyunexpected[] = "syntax error, unexpected %s";
static char const yyexpecting[] = ", expecting %s";
static char const yyor[] = " or %s";
char yyformat[sizeof yyunexpected
+ sizeof yyexpecting - 1
+ ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
* (sizeof yyor - 1))];
char const *yyprefix = yyexpecting;
/* Start YYX at -YYN if negative to avoid negative indexes in
YYCHECK. */
int yyxbegin = yyn < 0 ? -yyn : 0;
/* Stay within bounds of both yycheck and yytname. */
int yychecklim = YYLAST - yyn + 1;
int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
int yycount = 1;
yyarg[0] = yytname[yytype];
yyfmt = yystpcpy (yyformat, yyunexpected);
for (yyx = yyxbegin; yyx < yyxend; ++yyx)
if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
{
if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
{
yycount = 1;
yysize = yysize0;
yyformat[sizeof yyunexpected - 1] = '\0';
break;
}
yyarg[yycount++] = yytname[yyx];
yysize1 = yysize + yytnamerr (0, yytname[yyx]);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
yyfmt = yystpcpy (yyfmt, yyprefix);
yyprefix = yyor;
}
yyf = YY_(yyformat);
yysize1 = yysize + yystrlen (yyf);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
if (yysize_overflow)
return YYSIZE_MAXIMUM;
if (yyresult)
{
/* Avoid sprintf, as that infringes on the user's name space.
Don't have undefined behavior even if the translation
produced a string with the wrong number of "%s"s. */
char *yyp = yyresult;
int yyi = 0;
while ((*yyp = *yyf) != '\0')
{
if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
{
yyp += yytnamerr (yyp, yyarg[yyi++]);
yyf += 2;
}
else
{
yyp++;
yyf++;
}
}
}
return yysize;
}
}
#endif /* YYERROR_VERBOSE */
/*-----------------------------------------------.
| Release the memory associated to this symbol. |
`-----------------------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
#else
static void
yydestruct (yymsg, yytype, yyvaluep)
const char *yymsg;
int yytype;
YYSTYPE *yyvaluep;
#endif
{
YYUSE (yyvaluep);
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
switch (yytype)
{
default:
break;
}
}
/* Prevent warnings from -Wmissing-prototypes. */
#ifdef YYPARSE_PARAM
#if defined __STDC__ || defined __cplusplus
int yyparse (void *YYPARSE_PARAM);
#else
int yyparse ();
#endif
#else /* ! YYPARSE_PARAM */
#if defined __STDC__ || defined __cplusplus
int yyparse (void);
#else
int yyparse ();
#endif
#endif /* ! YYPARSE_PARAM */
/* The lookahead symbol. */
int yychar;
/* The semantic value of the lookahead symbol. */
YYSTYPE yylval;
/* Number of syntax errors so far. */
int yynerrs;
/*-------------------------.
| yyparse or yypush_parse. |
`-------------------------*/
#ifdef YYPARSE_PARAM
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void *YYPARSE_PARAM)
#else
int
yyparse (YYPARSE_PARAM)
void *YYPARSE_PARAM;
#endif
#else /* ! YYPARSE_PARAM */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void)
#else
int
yyparse ()
#endif
#endif
{
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
`yyss': related to states.
`yyvs': related to semantic values.
Refer to the stacks thru separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yytoken = 0;
yyss = yyssa;
yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
/* Initialize stack pointers.
Waste one element of value and location stack
so that they stay on the same level as the state stack.
The wasted elements are never initialized. */
yyssp = yyss;
yyvsp = yyvs;
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yyn == YYPACT_NINF)
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = YYLEX;
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yyn == 0 || yyn == YYTABLE_NINF)
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
*++yyvsp = yylval;
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
`$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 4:
/* Line 1455 of yacc.c */
#line 145 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("STMT"); ;}
break;
case 5:
/* Line 1455 of yacc.c */
#line 149 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_select((yyvsp[(1) - (7)].strval), (yyvsp[(6) - (7)].strval), (yyvsp[(7) - (7)].intval)); ;}
break;
case 6:
/* Line 1455 of yacc.c */
#line 151 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_load((yyvsp[(1) - (12)].strval), (yyvsp[(4) - (12)].strval), (yyvsp[(11) - (12)].intval), (yyvsp[(7) - (12)].strval)); ;}
break;
case 7:
/* Line 1455 of yacc.c */
#line 153 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_load_binary((yyvsp[(1) - (9)].strval), (yyvsp[(4) - (9)].strval), (yyvsp[(8) - (9)].intval)); ;}
break;
case 8:
/* Line 1455 of yacc.c */
#line 155 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_filter((yyvsp[(1) - (5)].strval), (yyvsp[(4) - (5)].strval), (yyvsp[(5) - (5)].intval));;}
break;
case 9:
/* Line 1455 of yacc.c */
#line 157 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_order((yyvsp[(1) - (6)].strval), (yyvsp[(4) - (6)].strval), (yyvsp[(6) - (6)].intval));;}
break;
case 10:
/* Line 1455 of yacc.c */
#line 159 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_join((yyvsp[(1) - (8)].strval),(yyvsp[(6) - (8)].strval),(yyvsp[(7) - (8)].intval)); ;}
break;
case 11:
/* Line 1455 of yacc.c */
#line 161 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_store((yyvsp[(2) - (9)].strval),(yyvsp[(4) - (9)].strval),(yyvsp[(7) - (9)].strval)); ;}
break;
case 12:
/* Line 1455 of yacc.c */
#line 163 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_store_binary((yyvsp[(2) - (6)].strval),(yyvsp[(4) - (6)].strval)); ;}
break;
case 13:
/* Line 1455 of yacc.c */
#line 167 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_name((yyvsp[(1) - (1)].strval)); ;}
break;
case 14:
/* Line 1455 of yacc.c */
#line 168 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("FIELDNAME %s.%s", (yyvsp[(1) - (3)].strval), (yyvsp[(3) - (3)].strval)); ;}
break;
case 15:
/* Line 1455 of yacc.c */
#line 169 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("USERVAR %s", (yyvsp[(1) - (1)].strval)); ;}
break;
case 16:
/* Line 1455 of yacc.c */
#line 170 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_string((yyvsp[(1) - (1)].strval)); ;}
break;
case 17:
/* Line 1455 of yacc.c */
#line 171 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_number((yyvsp[(1) - (1)].intval)); ;}
break;
case 18:
/* Line 1455 of yacc.c */
#line 172 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_float((yyvsp[(1) - (1)].floatval)); ;}
break;
case 19:
/* Line 1455 of yacc.c */
#line 173 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_decimal((yyvsp[(1) - (1)].intval)); ;}
break;
case 20:
/* Line 1455 of yacc.c */
#line 174 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("BOOL %d", (yyvsp[(1) - (1)].intval)); ;}
break;
case 21:
/* Line 1455 of yacc.c */
#line 175 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_varchar((yyvsp[(1) - (9)].strval), (yyvsp[(3) - (9)].intval), (yyvsp[(6) - (9)].strval), (yyvsp[(8) - (9)].intval));;}
break;
case 22:
/* Line 1455 of yacc.c */
#line 176 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_var((yyvsp[(1) - (6)].strval), (yyvsp[(3) - (6)].intval), (yyvsp[(6) - (6)].strval));;}
break;
case 23:
/* Line 1455 of yacc.c */
#line 177 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_var_asc((yyvsp[(1) - (2)].strval));;}
break;
case 24:
/* Line 1455 of yacc.c */
#line 178 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_var_desc((yyvsp[(1) - (2)].strval));;}
break;
case 25:
/* Line 1455 of yacc.c */
#line 179 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_count(); ;}
break;
case 26:
/* Line 1455 of yacc.c */
#line 180 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_sum(); ;}
break;
case 27:
/* Line 1455 of yacc.c */
#line 181 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_average(); ;}
break;
case 28:
/* Line 1455 of yacc.c */
#line 182 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_min(); ;}
break;
case 29:
/* Line 1455 of yacc.c */
#line 183 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_max(); ;}
break;
case 30:
/* Line 1455 of yacc.c */
#line 184 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_distinct(); ;}
break;
case 31:
/* Line 1455 of yacc.c */
#line 188 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_add(); ;}
break;
case 32:
/* Line 1455 of yacc.c */
#line 189 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_minus(); ;}
break;
case 33:
/* Line 1455 of yacc.c */
#line 190 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_mul(); ;}
break;
case 34:
/* Line 1455 of yacc.c */
#line 191 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_div(); ;}
break;
case 35:
/* Line 1455 of yacc.c */
#line 192 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("MOD"); ;}
break;
case 36:
/* Line 1455 of yacc.c */
#line 193 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("MOD"); ;}
break;
case 37:
/* Line 1455 of yacc.c */
#line 195 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_and(); ;}
break;
case 38:
/* Line 1455 of yacc.c */
#line 196 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_eq(); ;}
break;
case 39:
/* Line 1455 of yacc.c */
#line 197 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_or(); ;}
break;
case 40:
/* Line 1455 of yacc.c */
#line 198 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("XOR"); ;}
break;
case 41:
/* Line 1455 of yacc.c */
#line 199 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("SHIFT %s", (yyvsp[(2) - (3)].subtok)==1?"left":"right"); ;}
break;
case 42:
/* Line 1455 of yacc.c */
#line 200 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("NOT"); ;}
break;
case 43:
/* Line 1455 of yacc.c */
#line 201 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("NOT"); ;}
break;
case 44:
/* Line 1455 of yacc.c */
#line 202 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_cmp((yyvsp[(2) - (3)].subtok)); ;}
break;
case 45:
/* Line 1455 of yacc.c */
#line 204 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("CMPSELECT %d", (yyvsp[(2) - (5)].subtok)); ;}
break;
case 46:
/* Line 1455 of yacc.c */
#line 205 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{emit("EXPR");;}
break;
case 47:
/* Line 1455 of yacc.c */
#line 209 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("ISBOOL %d", (yyvsp[(3) - (3)].intval)); ;}
break;
case 48:
/* Line 1455 of yacc.c */
#line 210 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("ISBOOL %d", (yyvsp[(4) - (4)].intval)); emit("NOT"); ;}
break;
case 49:
/* Line 1455 of yacc.c */
#line 213 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ /* nil */
(yyval.intval) = 0;
;}
break;
case 50:
/* Line 1455 of yacc.c */
#line 216 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = (yyvsp[(3) - (3)].intval);}
break;
case 51:
/* Line 1455 of yacc.c */
#line 220 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; emit_sel_name((yyvsp[(3) - (3)].strval));;}
break;
case 52:
/* Line 1455 of yacc.c */
#line 221 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = (yyvsp[(1) - (5)].intval) + 1; emit_sel_name((yyvsp[(5) - (5)].strval));;}
break;
case 53:
/* Line 1455 of yacc.c */
#line 225 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; ;}
break;
case 54:
/* Line 1455 of yacc.c */
#line 226 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{(yyval.intval) = (yyvsp[(1) - (3)].intval) + 1; ;}
break;
case 55:
/* Line 1455 of yacc.c */
#line 230 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; ;}
break;
case 56:
/* Line 1455 of yacc.c */
#line 231 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1 + (yyvsp[(3) - (3)].intval); ;}
break;
case 57:
/* Line 1455 of yacc.c */
#line 234 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 59:
/* Line 1455 of yacc.c */
#line 239 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("FILTER BY"); ;}
break;
case 60:
/* Line 1455 of yacc.c */
#line 242 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (4)].strval), 0);;}
break;
case 61:
/* Line 1455 of yacc.c */
#line 243 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(3) - (5)].strval), 1);;}
break;
case 62:
/* Line 1455 of yacc.c */
#line 244 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (5)].strval), 0); ;}
break;
case 63:
/* Line 1455 of yacc.c */
#line 246 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 64:
/* Line 1455 of yacc.c */
#line 249 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_limit((yyvsp[(2) - (2)].intval)); ;}
break;
/* Line 1455 of yacc.c */
#line 2045 "c:\\GnuWin32\\bin\\alenka\\bison.cu"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now `shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*------------------------------------.
| yyerrlab -- here on detecting error |
`------------------------------------*/
yyerrlab:
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (YY_("syntax error"));
#else
{
YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
{
YYSIZE_T yyalloc = 2 * yysize;
if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
yyalloc = YYSTACK_ALLOC_MAXIMUM;
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yyalloc);
if (yymsg)
yymsg_alloc = yyalloc;
else
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
}
}
if (0 < yysize && yysize <= yymsg_alloc)
{
(void) yysyntax_error (yymsg, yystate, yychar);
yyerror (yymsg);
}
else
{
yyerror (YY_("syntax error"));
if (yysize != 0)
goto yyexhaustedlab;
}
}
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule which action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (yyn != YYPACT_NINF)
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
*++yyvsp = yylval;
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined(yyoverflow) || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval);
/* Do not reclaim the symbols of the rule which action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
/* Make sure YYID is used. */
return YYID (yyresult);
}
/* Line 1675 of yacc.c */
#line 252 "c:\\GnuWin32\\bin\\alenka\\bison.y"
#include "filter.h"
#include "select.h"
#include "merge.h"
#include "zone_map.h"
#include "atof.h"
#include "sorts.hip"
#include <limits>
#include "cudpp_src_2.0/include/cudpp_hash.h"
size_t int_size = sizeof(int_type);
size_t float_size = sizeof(float_type);
FILE *file_pointer;
queue<string> namevars;
queue<string> typevars;
queue<int> sizevars;
queue<int> cols;
queue<unsigned int> j_col_count;
unsigned int sel_count = 0;
unsigned int join_cnt = 0;
unsigned int distinct_cnt = 0;
int join_col_cnt = 0;
stack<string> op_join;
bool left_join;
unsigned int statement_count = 0;
map<string,unsigned int> stat;
bool scan_state = 0;
string separator, f_file;
unsigned int int_col_count;
CUDPPHandle theCudpp;
using namespace thrust::placeholders;
void emit_name(char *name)
{
op_type.push("NAME");
op_value.push(name);
}
void emit_limit(int val)
{
op_nums.push(val);
}
void emit_string(char *str)
{ // remove the float_type quotes
string sss(str,1, strlen(str)-2);
op_type.push("STRING");
op_value.push(sss);
}
void emit_number(int_type val)
{
op_type.push("NUMBER");
op_nums.push(val);
}
void emit_float(float_type val)
{
op_type.push("FLOAT");
op_nums_f.push(val);
}
void emit_decimal(float_type val)
{
op_type.push("DECIMAL");
op_nums_f.push(val);
}
void emit_mul()
{
op_type.push("MUL");
}
void emit_add()
{
op_type.push("ADD");
}
void emit_div()
{
op_type.push("DIV");
}
void emit_and()
{
op_type.push("AND");
join_col_cnt++;
}
void emit_eq()
{
op_type.push("JOIN");
}
void emit_distinct()
{
op_type.push("DISTINCT");
distinct_cnt++;
}
void emit_or()
{
op_type.push("OR");
}
void emit_minus()
{
op_type.push("MINUS");
}
void emit_cmp(int val)
{
op_type.push("CMP");
op_nums.push(val);
}
void emit(char *s, ...)
{
}
void emit_var(char *s, int c, char *f)
{
namevars.push(s);
typevars.push(f);
sizevars.push(0);
cols.push(c);
}
void emit_var_asc(char *s)
{
op_type.push(s);
op_value.push("ASC");
}
void emit_var_desc(char *s)
{
op_type.push(s);
op_value.push("DESC");
}
void emit_varchar(char *s, int c, char *f, int d)
{
namevars.push(s);
typevars.push(f);
sizevars.push(d);
cols.push(c);
}
void emit_sel_name(char *s)
{
op_type.push("emit sel_name");
op_value.push(s);
sel_count++;
}
void emit_count()
{
op_type.push("COUNT");
}
void emit_sum()
{
op_type.push("SUM");
}
void emit_average()
{
op_type.push("AVG");
}
void emit_min()
{
op_type.push("MIN");
}
void emit_max()
{
op_type.push("MAX");
}
void emit_join_tab(char *s, bool left)
{
op_join.push(s);
left_join = left;
};
void order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, unsigned int segment)
{
//std::clock_t start1 = std::clock();
unsigned int sz = a->mRecCount;
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(sz);
thrust::sequence(permutation, permutation+sz,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
void* temp;
// find the largest mRecSize of all data sources exe_type.top()
unsigned int maxSize = 0;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
CudaSet *t = varNames[setMap[*it]];
if(t->mRecCount > maxSize)
maxSize = t->mRecCount;
};
unsigned int max_c = max_char(a, field_names);
//cout << "max_c " << max_c << endl;
if(max_c > float_size)
CUDA_SAFE_CALL(hipMalloc((void **) &temp, maxSize*max_c));
else
CUDA_SAFE_CALL(hipMalloc((void **) &temp, maxSize*float_size));
unsigned int str_count = 0;
for(int i=0; !exe_type.empty(); ++i, exe_type.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if (a->type[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, sz, "ASC", (int_type*)temp);
else if (a->type[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, sz,"ASC", (float_type*)temp);
else {
// use int col int_col_count
update_permutation(a->d_columns_int[int_col_count+str_count], raw_ptr, sz, "ASC", (int_type*)temp);
str_count++;
};
};
str_count = 0;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
int i = a->columnNames[*it];
if (a->type[i] == 0)
apply_permutation(a->d_columns_int[a->type_index[i]], raw_ptr, sz, (int_type*)temp);
else if (a->type[i] == 1)
apply_permutation(a->d_columns_float[a->type_index[i]], raw_ptr, sz, (float_type*)temp);
else {
apply_permutation_char(a->d_columns_char[a->type_index[i]], raw_ptr, sz, (char*)temp, a->char_size[a->type_index[i]]);
apply_permutation(a->d_columns_int[int_col_count + str_count], raw_ptr, sz, (int_type*)temp);
str_count++;
};
};
hipFree(temp);
thrust::device_free(permutation);
}
int hh = 0;
void emit_join(char *s, char *j1, int grp)
{
string j2 = op_join.top();
op_join.pop();
statement_count++;
if (scan_state == 0) {
if (stat.find(j1) == stat.end()) {
cout << "Join : couldn't find variable " << j1 << endl;
exit(1);
};
if (stat.find(j2) == stat.end()) {
cout << "Join : couldn't find variable " << j2 << endl;
exit(1);
};
stat[s] = statement_count;
stat[j1] = statement_count;
stat[j2] = statement_count;
return;
};
if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) {
clean_queues();
return;
};
CudaSet* left = varNames.find(j1)->second;
CudaSet* right = varNames.find(j2)->second;
queue<string> op_sel;
queue<string> op_sel_as;
for(int i=0; i < sel_count; i++) {
op_sel.push(op_value.front());
op_value.pop();
op_sel_as.push(op_value.front());
op_value.pop();
};
string f1 = op_value.front();
op_value.pop();
string f2 = op_value.front();
op_value.pop();
cout << "JOIN " << s << " " << getFreeMem() << endl;
//cout << "join col count " << join_col_cnt << endl;
queue<string> op_v1(op_value);
while(op_v1.size() ) {
op_v1.pop();
grp++;
};
std::clock_t start1 = std::clock();
CudaSet* c = new CudaSet(right,left,0,op_sel, op_sel_as);
if (left->mRecCount == 0 || right->mRecCount == 0) {
c = new CudaSet(left,right,0, op_sel, op_sel_as);
varNames[s] = c;
clean_queues();
return;
};
unsigned int colInd1, colInd2;
string tmpstr;
if (left->columnNames.find(f1) != left->columnNames.end()) {
colInd1 = (left->columnNames).find(f1)->second;
if (right->columnNames.find(f2) != right->columnNames.end()) {
colInd2 = (right->columnNames).find(f2)->second;
}
else {
cout << "Couldn't find column " << f2 << endl;
exit(0);
};
}
else if (right->columnNames.find(f1) != right->columnNames.end()) {
colInd2 = (right->columnNames).find(f1)->second;
tmpstr = f1;
f1 = f2;
if (left->columnNames.find(f2) != left->columnNames.end()) {
colInd1 = (left->columnNames).find(f2)->second;
f2 = tmpstr;
}
else {
cout << "Couldn't find column " << f2 << endl;
exit(0);
};
}
else {
cout << "Couldn't find column " << f1 << endl;
exit(0);
};
if (!((left->type[colInd1] == 0 && right->type[colInd2] == 0) || (left->type[colInd1] == 2 && right->type[colInd2] == 2)
|| (left->type[colInd1] == 1 && right->type[colInd2] == 1 && left->decimal[colInd1] && right->decimal[colInd2]))) {
cout << "Joins on floats are not supported " << endl;
exit(0);
};
bool decimal_join = 0;
if (left->type[colInd1] == 1 && right->type[colInd2] == 1)
decimal_join = 1;
set<string> field_names;
stack<string> exe_type;
exe_type.push(f2);
field_names.insert(f2);
bool str_join = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
if (right->type[colInd2] == 2) {
str_join = 1;
right->d_columns_int.push_back(thrust::device_vector<int_type>());
for(unsigned int i = 0; i < right->segCount; i++) {
right->add_hashed_strings(f2, i, right->d_columns_int.size()-1);
};
};
// need to allocate all right columns
queue<string> cc;
unsigned int rcount;
curr_segment = 10000000;
queue<string> op_vd(op_value);
queue<string> op_alt(op_sel);
unsigned int jc = join_col_cnt;
while(jc) {
jc--;
op_vd.pop();
op_alt.push(op_vd.front());
op_vd.pop();
};
unsigned int cnt_r = load_queue(op_alt, right, str_join, f2, rcount);
if(str_join) {
colInd2 = right->mColumnCount+1;
right->type_index[colInd2] = right->d_columns_int.size()-1;
};
//here we need to make sure that right column is ordered. If not then we order it and keep the permutation
bool sorted;
if(!decimal_join)
sorted = thrust::is_sorted(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r);
else
sorted = thrust::is_sorted(right->d_columns_float[right->type_index[colInd2]].begin(), right->d_columns_float[right->type_index[colInd2]].begin() + cnt_r);
if(!sorted) {
queue<string> ss(op_sel);
thrust::device_vector<unsigned int> v(cnt_r);
thrust::sequence(v.begin(),v.end(),0,1);
unsigned int max_c = max_char(right);
unsigned int mm;
if(max_c > 8)
mm = (max_c/8) + 1;
else
mm = 1;
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(cnt_r*mm);
thrust::sort_by_key(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r, v.begin());
//for(unsigned int i = 0; i < right->mColumnCount; i++) {
unsigned int i;
while(!ss.empty()) {
if (right->columnNames.find(ss.front()) != right->columnNames.end()) {
i = right->columnNames[ss.front()];
if(i != colInd2) {
if(right->type[i] == 0) {
thrust::gather(v.begin(), v.end(), right->d_columns_int[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->d_columns_int[right->type_index[i]].begin());
}
else if(right->type[i] == 1) {
thrust::gather(v.begin(), v.end(), right->d_columns_float[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->d_columns_float[right->type_index[i]].begin());
}
else {
str_gather(thrust::raw_pointer_cast(v.data()), cnt_r, (void*)right->d_columns_char[right->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), right->char_size[right->type_index[i]]);
hipMemcpy( (void*)right->d_columns_char[right->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), cnt_r*right->char_size[right->type_index[i]], hipMemcpyDeviceToDevice);
};
};
};
ss.pop();
};
thrust::device_free(d_tmp);
};
bool v64bit;
if(right->d_columns_int[right->type_index[colInd2]][cnt_r-1] > std::numeric_limits<unsigned int>::max())
v64bit = 1;
else
v64bit = 0;
while(!cc.empty())
cc.pop();
if (left->type[colInd1] == 2) {
left->d_columns_int.push_back(thrust::device_vector<int_type>());
//colInd1 = left->mColumnCount+1;
//left->type_index[colInd1] = left->d_columns_int.size()-1;
}
else {
cc.push(f1);
allocColumns(left, cc);
};
thrust::device_vector<unsigned int> d_res1;
thrust::device_vector<unsigned int> d_res2;
unsigned int cnt_l, res_count, tot_count = 0, offset = 0, k = 0;
queue<string> lc(cc);
curr_segment = 10000000;
CUDPPResult result;
CUDPPHandle hash_table_handle;
CUDPPHashTableConfig config;
config.type = CUDPP_MULTIVALUE_HASH_TABLE;
config.kInputSize = cnt_r;
config.space_usage = 1.5f;
cout << "creating table with " << cnt_r << " " << getFreeMem() << endl;
result = cudppHashTable(theCudpp, &hash_table_handle, &config);
if (result == CUDPP_SUCCESS)
cout << "hash tables created " << getFreeMem() << endl;
unsigned int tt;
if(left->maxRecs > rcount)
tt = left->maxRecs;
else
tt = rcount;
thrust::device_ptr<unsigned int> d_r = thrust::device_malloc<unsigned int>(tt);
thrust::device_vector<unsigned int> v(cnt_r);
thrust::sequence(v.begin(),v.end(),0,1);
thrust::copy(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r,
d_r);
result = cudppHashInsert(hash_table_handle, thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(v.data()), cnt_r);
if (result == CUDPP_SUCCESS)
cout << "hash table inserted " << getFreeMem() << endl;
thrust::device_ptr<uint2> res = thrust::device_malloc<uint2>(left->maxRecs);
for (unsigned int i = 0; i < left->segCount; i++) {
cout << "segment " << i << " " << getFreeMem() << endl;
cnt_l = 0;
if (left->type[colInd1] != 2) {
copyColumns(left, lc, i, cnt_l);
}
else {
left->add_hashed_strings(f1, i, left->d_columns_int.size());
};
if(left->prm.empty()) {
//copy all records
cnt_l = left->mRecCount;
}
else {
cnt_l = left->prm_count[i];
};
if (cnt_l) {
unsigned int idx;
if(!str_join)
idx = left->type_index[colInd1];
else
idx = left->d_columns_int.size()-1;
unsigned int left_sz;
if(decimal_join) {
thrust::transform(left->d_columns_float[idx].begin(), left->d_columns_float[idx].begin() + cnt_l,
d_r, float_to_int_lower());
}
else {
thrust::copy(left->d_columns_int[idx].begin(), left->d_columns_int[idx].begin() + cnt_l,
d_r);
};
//cout << "joining " << cnt_l << " with " << cnt_r << endl;
result = cudppHashRetrieve(hash_table_handle, thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(res), cnt_l);
if (result != CUDPP_SUCCESS)
cout << "Failed retrieve " << endl;
uint2 rr = thrust::reduce(res, res+cnt_l, make_uint2(0,0), Uint2Sum());
res_count = rr.y;
d_res1.resize(res_count);
d_res2.resize(res_count);
cout << "res cnt " << res_count << endl;
if(res_count) {
thrust::counting_iterator<unsigned int> begin(0);
uint2_split ff(thrust::raw_pointer_cast(res),thrust::raw_pointer_cast(d_r));
thrust::for_each(begin, begin + cnt_l, ff);
thrust::exclusive_scan(d_r, d_r+cnt_l, d_r ); // addresses
join_functor1 ff1(thrust::raw_pointer_cast(res),
thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(d_res1.data()),
thrust::raw_pointer_cast(d_res2.data()));
thrust::for_each(begin, begin + cnt_l, ff1);
if(v64bit) {// need to check the upper 32 bits
thrust::device_ptr<bool> d_add = thrust::device_malloc<bool>(d_res1.size());
if(decimal_join) {
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter_left(left->d_columns_float[idx].begin(), d_res1.begin());
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter_right(right->d_columns_float[right->type_index[colInd2]].begin(), d_res2.begin());
thrust::transform(iter_left, iter_left+d_res2.size(), iter_right, d_add, float_upper_equal_to());
}
else {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter_left(left->d_columns_int[idx].begin(), d_res1.begin());
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter_right(right->d_columns_int[right->type_index[colInd2]].begin(), d_res2.begin());
thrust::transform(iter_left, iter_left+d_res2.size(), iter_right, d_add, int_upper_equal_to());
};
unsigned int new_cnt = thrust::count(d_add, d_add+d_res1.size(), 1);
thrust::stable_partition(d_res1.begin(), d_res1.begin() + d_res2.size(), d_add, thrust::identity<unsigned int>());
thrust::stable_partition(d_res2.begin(), d_res2.end(), d_add, thrust::identity<unsigned int>());
thrust::device_free(d_add);
d_res2.resize(new_cnt);
d_res1.resize(new_cnt);
};
};
// check if the join is a multicolumn join
while(join_col_cnt) {
join_col_cnt--;
string f3 = op_value.front();
op_value.pop();
string f4 = op_value.front();
op_value.pop();
queue<string> rc;
rc.push(f3);
allocColumns(left, rc);
copyColumns(left, rc, i, cnt_l);
rc.pop();
thrust::device_ptr<bool> d_add = thrust::device_malloc<bool>(d_res1.size());
if (d_res1.size() && d_res2.size()) {
unsigned int colInd3 = (left->columnNames).find(f3)->second;
unsigned int colInd4 = (right->columnNames).find(f4)->second;
if (left->type[colInd3] == 1 && right->type[colInd4] == 1) {
if(right->d_columns_float[right->type_index[colInd4]].size() == 0)
unsigned int cnt_r = load_queue(rc, right, 0, f4, rcount);
thrust::device_ptr<int_type> d_l = thrust::device_malloc<int_type>(d_res1.size());
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter_left(left->d_columns_float[left->type_index[colInd3]].begin(), d_res1.begin());
thrust::transform(iter_left, iter_left+d_res1.size(), d_l, float_to_long());
thrust::device_ptr<int_type> d_r = thrust::device_malloc<int_type>(d_res1.size());
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter_right(right->d_columns_float[right->type_index[colInd4]].begin(), d_res2.begin());
thrust::transform(iter_right, iter_right+d_res2.size(), d_r, float_to_long());
thrust::transform(d_l, d_l+d_res1.size(), d_r, d_add, thrust::equal_to<int_type>());
}
else {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter_left(left->d_columns_int[left->type_index[colInd3]].begin(), d_res1.begin());
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter_right(right->d_columns_int[right->type_index[colInd4]].begin(), d_res2.begin());
thrust::transform(iter_left, iter_left+d_res2.size(), iter_right, d_add, thrust::equal_to<int_type>());
};
unsigned int new_cnt = thrust::count(d_add, d_add+d_res1.size(), 1);
thrust::stable_partition(d_res1.begin(), d_res1.begin() + d_res2.size(), d_add, thrust::identity<unsigned int>());
thrust::stable_partition(d_res2.begin(), d_res2.end(), d_add, thrust::identity<unsigned int>());
d_res2.resize(new_cnt);
thrust::device_free(d_add);
if(!left_join) {
d_res1.resize(new_cnt);
}
else {
left_sz = d_res1.size() - d_res2.size();
};
};
};
res_count = d_res1.size();
tot_count = tot_count + res_count;
//cout << "res " << res_count << endl;
std::clock_t start5 = std::clock();
if(res_count) {
offset = c->mRecCount;
if(i == 0 && left->segCount != 1) {
c->reserve(res_count*(left->segCount+1));
//cout << "prealloced " << left->segCount+1 << endl;
};
c->resize(res_count);
queue<string> op_sel1(op_sel);
unsigned int colInd, c_colInd;
if(left->segCount == 1) {
thrust::device_free(d_r);
thrust::device_free(res);
};
while(!op_sel1.empty()) {
while(!cc.empty())
cc.pop();
cc.push(op_sel1.front());
c_colInd = c->columnNames[op_sel1.front()];
//cout << "gathering " << op_sel1.front() << endl;
if(left->columnNames.find(op_sel1.front()) != left->columnNames.end()) {
// copy field's segment to device, gather it and copy to the host
unsigned int colInd = left->columnNames[op_sel1.front()];
reset_offsets();
allocColumns(left, cc);
copyColumns(left, cc, i, k);
//gather
if(left->type[colInd] == 0) {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter(left->d_columns_int[left->type_index[colInd]].begin(), d_res1.begin());
thrust::copy(iter, iter + res_count, c->h_columns_int[c->type_index[c_colInd]].begin() + offset);
}
else if(left->type[colInd] == 1) {
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter(left->d_columns_float[left->type_index[colInd]].begin(), d_res1.begin());
thrust::copy(iter, iter + res_count, c->h_columns_float[c->type_index[c_colInd]].begin() + offset);
}
else { //strings
thrust::device_ptr<char> d_tmp = thrust::device_malloc<char>(res_count*left->char_size[left->type_index[colInd]]);
str_gather(thrust::raw_pointer_cast(d_res1.data()), res_count, (void*)left->d_columns_char[left->type_index[colInd]],
(void*) thrust::raw_pointer_cast(d_tmp), left->char_size[left->type_index[colInd]]);
hipMemcpy( (void*)&c->h_columns_char[c->type_index[c_colInd]][offset*c->char_size[c->type_index[c_colInd]]], (void*) thrust::raw_pointer_cast(d_tmp),
c->char_size[c->type_index[c_colInd]] * res_count, hipMemcpyDeviceToHost);
thrust::device_free(d_tmp);
}
left->deAllocColumnOnDevice(colInd);
}
else if(right->columnNames.find(op_sel1.front()) != right->columnNames.end()) {
colInd = right->columnNames[op_sel1.front()];
//gather
if(right->type[colInd] == 0) {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter(right->d_columns_int[right->type_index[colInd]].begin(), d_res2.begin());
thrust::copy(iter, iter + d_res2.size(), c->h_columns_int[c->type_index[c_colInd]].begin() + offset);
if(left_join && left_sz) {
thrust::fill(c->h_columns_int[c->type_index[c_colInd]].begin() + offset + d_res2.size(),
c->h_columns_int[c->type_index[c_colInd]].begin() + offset + d_res2.size() + left_sz,
0);
};
}
else if(right->type[colInd] == 1) {
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter(right->d_columns_float[right->type_index[colInd]].begin(), d_res2.begin());
thrust::copy(iter, iter + d_res2.size(), c->h_columns_float[c->type_index[c_colInd]].begin() + offset);
if(left_join && left_sz) {
thrust::fill(c->h_columns_float[c->type_index[c_colInd]].begin() + offset + d_res2.size(),
c->h_columns_float[c->type_index[c_colInd]].begin() + offset + d_res2.size() + left_sz,
0);
};
}
else { //strings
thrust::device_ptr<char> d_tmp = thrust::device_malloc<char>(d_res2.size()*right->char_size[right->type_index[colInd]]);
str_gather(thrust::raw_pointer_cast(d_res2.data()), d_res2.size(), (void*)right->d_columns_char[right->type_index[colInd]],
(void*) thrust::raw_pointer_cast(d_tmp), right->char_size[right->type_index[colInd]]);
hipMemcpy( (void*)(c->h_columns_char[c->type_index[c_colInd]] + offset*c->char_size[c->type_index[c_colInd]]), (void*) thrust::raw_pointer_cast(d_tmp),
c->char_size[c->type_index[c_colInd]] * d_res2.size(), hipMemcpyDeviceToHost);
if(left_join && left_sz) {
memset((void*)(c->h_columns_char[c->type_index[c_colInd]] + (d_res2.size() + offset)*c->char_size[c->type_index[c_colInd]]), 0,
left_sz*c->char_size[c->type_index[c_colInd]]);
};
thrust::device_free(d_tmp);
}
}
else {
cout << "Couldn't find field " << op_sel1.front() << endl;
exit(0);
};
//cout << "gathered " << op_sel1.front() << endl;
op_sel1.pop();
};
};
};
};
d_res1.resize(0);
d_res1.shrink_to_fit();
d_res2.resize(0);
d_res2.shrink_to_fit();
left->deAllocOnDevice();
right->deAllocOnDevice();
c->deAllocOnDevice();
cudppDestroyHashTable(theCudpp, hash_table_handle);
unsigned int i = 0;
while(!col_aliases.empty()) {
c->columnNames[col_aliases.front()] = i;
col_aliases.pop();
i++;
};
varNames[s] = c;
c->mRecCount = tot_count;
c->maxRecs = tot_count;
cout << "join count " << tot_count << endl;
for ( map<string,int>::iterator it=c->columnNames.begin() ; it != c->columnNames.end(); ++it )
setMap[(*it).first] = s;
clean_queues();
if(stat[s] == statement_count) {
c->free();
varNames.erase(s);
};
if(stat[j1] == statement_count) {
left->free();
varNames.erase(j1);
};
if(stat[j2] == statement_count && (strcmp(j1,j2.c_str()) != 0)) {
right->free();
varNames.erase(j2);
};
std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void order_on_host(CudaSet *a, CudaSet* b, queue<string> names, stack<string> exe_type, stack<string> exe_value)
{
unsigned int tot = 0;
if(!a->not_compressed) { //compressed
allocColumns(a, names);
unsigned int c = 0;
if(a->prm_count.size()) {
for(unsigned int i = 0; i < a->prm.size(); i++)
c = c + a->prm_count[i];
}
else
c = a->mRecCount;
a->mRecCount = 0;
a->resize(c);
unsigned int cnt = 0;
for(unsigned int i = 0; i < a->segCount; i++) {
copyColumns(a, names, (a->segCount - i) - 1, cnt); //uses segment 1 on a host to copy data from a file to gpu
if (a->mRecCount) {
a->CopyToHost((c - tot) - a->mRecCount, a->mRecCount);
tot = tot + a->mRecCount;
};
};
}
else
tot = a->mRecCount;
b->resize(tot); //resize host arrays
a->mRecCount = tot;
unsigned int* permutation = new unsigned int[a->mRecCount];
thrust::sequence(permutation, permutation + a->mRecCount);
unsigned int maxSize = a->mRecCount;
char* temp;
unsigned int max_c = max_char(a);
if(max_c > float_size)
temp = new char[maxSize*max_c];
else
temp = new char[maxSize*float_size];
// sort on host
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation_host(a->h_columns_int[a->type_index[colInd]].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation_host(a->h_columns_float[a->type_index[colInd]].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp);
else {
update_permutation_char_host(a->h_columns_char[a->type_index[colInd]], permutation, a->mRecCount, exe_value.top(), b->h_columns_char[b->type_index[colInd]], a->char_size[a->type_index[colInd]]);
};
};
for (unsigned int i = 0; i < a->mColumnCount; i++) {
if ((a->type)[i] == 0) {
apply_permutation_host(a->h_columns_int[a->type_index[i]].data(), permutation, a->mRecCount, b->h_columns_int[b->type_index[i]].data());
}
else if ((a->type)[i] == 1)
apply_permutation_host(a->h_columns_float[a->type_index[i]].data(), permutation, a->mRecCount, b->h_columns_float[b->type_index[i]].data());
else {
apply_permutation_char_host(a->h_columns_char[a->type_index[i]], permutation, a->mRecCount, b->h_columns_char[b->type_index[i]], a->char_size[a->type_index[i]]);
};
};
delete [] temp;
delete [] permutation;
}
void emit_order(char *s, char *f, int e, int ll)
{
if(ll == 0)
statement_count++;
if (scan_state == 0 && ll == 0) {
if (stat.find(f) == stat.end()) {
cout << "Order : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end() ) {
clean_queues();
return;
};
CudaSet* a = varNames.find(f)->second;
if (a->mRecCount == 0) {
if(varNames.find(s) == varNames.end())
varNames[s] = new CudaSet(0,1);
else {
CudaSet* c = varNames.find(s)->second;
c->mRecCount = 0;
};
return;
};
stack<string> exe_type, exe_value;
cout << "order: " << s << " " << f << endl;
for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) {
if ((op_type.front()).compare("NAME") == 0) {
exe_type.push(op_value.front());
exe_value.push("ASC");
}
else {
exe_type.push(op_type.front());
exe_value.push(op_value.front());
};
};
stack<string> tp(exe_type);
queue<string> op_vx;
while (!tp.empty()) {
op_vx.push(tp.top());
tp.pop();
};
queue<string> names;
for ( map<string,int>::iterator it=a->columnNames.begin() ; it != a->columnNames.end(); ++it )
names.push((*it).first);
CudaSet *b = a->copyDeviceStruct();
//lets find out if our data set fits into a GPU
size_t mem_available = getFreeMem();
size_t rec_size = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[i] == 0)
rec_size = rec_size + int_size;
else if(a->type[i] == 1)
rec_size = rec_size + float_size;
else
rec_size = rec_size + a->char_size[a->type_index[i]];
};
bool fits;
if (rec_size*a->mRecCount > (mem_available/2)) // doesn't fit into a GPU
fits = 0;
else fits = 1;
if(!fits) {
order_on_host(a, b, names, exe_type, exe_value);
}
else {
// initialize permutation to [0, 1, 2, ... ,N-1]
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(a->mRecCount);
thrust::sequence(permutation, permutation+(a->mRecCount));
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
unsigned int maxSize = a->mRecCount;
void* temp;
unsigned int max_c = max_char(a);
if(max_c > float_size)
CUDA_SAFE_CALL(hipMalloc((void **) &temp, maxSize*max_c));
else
CUDA_SAFE_CALL(hipMalloc((void **) &temp, maxSize*float_size));
varNames[setMap[exe_type.top()]]->oldRecCount = varNames[setMap[exe_type.top()]]->mRecCount;
unsigned int rcount;
a->mRecCount = load_queue(names, a, 1, op_vx.front(), rcount);
varNames[setMap[exe_type.top()]]->mRecCount = varNames[setMap[exe_type.top()]]->oldRecCount;
unsigned int str_count = 0;
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, a->mRecCount, exe_value.top(), (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, a->mRecCount,exe_value.top(), (float_type*)temp);
else {
update_permutation_char(a->d_columns_char[a->type_index[colInd]], raw_ptr, a->mRecCount, exe_value.top(), (char*)temp, a->char_size[a->type_index[colInd]]);
//update_permutation(a->d_columns_int[int_col_count+str_count], raw_ptr, a->mRecCount, exe_value.top(), (int_type*)temp);
str_count++;
};
};
b->resize(a->mRecCount); //resize host arrays
b->mRecCount = a->mRecCount;
str_count = 0;
for (unsigned int i = 0; i < a->mColumnCount; i++) {
if ((a->type)[i] == 0)
apply_permutation(a->d_columns_int[a->type_index[i]], raw_ptr, a->mRecCount, (int_type*)temp);
else if ((a->type)[i] == 1)
apply_permutation(a->d_columns_float[a->type_index[i]], raw_ptr, a->mRecCount, (float_type*)temp);
else {
apply_permutation_char(a->d_columns_char[a->type_index[i]], raw_ptr, a->mRecCount, (char*)temp, a->char_size[a->type_index[i]]);
str_count++;
};
};
for(unsigned int i = 0; i < a->mColumnCount; i++) {
switch(a->type[i]) {
case 0 :
thrust::copy(a->d_columns_int[a->type_index[i]].begin(), a->d_columns_int[a->type_index[i]].begin() + a->mRecCount, b->h_columns_int[b->type_index[i]].begin());
break;
case 1 :
thrust::copy(a->d_columns_float[a->type_index[i]].begin(), a->d_columns_float[a->type_index[i]].begin() + a->mRecCount, b->h_columns_float[b->type_index[i]].begin());
break;
default :
hipMemcpy(b->h_columns_char[b->type_index[i]], a->d_columns_char[a->type_index[i]], a->char_size[a->type_index[i]]*a->mRecCount, hipMemcpyDeviceToHost);
}
};
b->deAllocOnDevice();
a->deAllocOnDevice();
thrust::device_free(permutation);
hipFree(temp);
};
varNames[s] = b;
b->segCount = 1;
b->not_compressed = 1;
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
}
void emit_select(char *s, char *f, int ll)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Select : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
queue<string> op_v1(op_value);
while(op_v1.size() > ll)
op_v1.pop();
stack<string> op_v2;
queue<string> op_v3;
for(int i=0; i < ll; ++i) {
op_v2.push(op_v1.front());
op_v3.push(op_v1.front());
op_v1.pop();
};
CudaSet *a;
a = varNames.find(f)->second;
if(a->mRecCount == 0) {
CudaSet *c;
c = new CudaSet(0,1);
varNames[s] = c;
clean_queues();
return;
};
cout << "SELECT " << s << " " << f << endl;
//cout << "free mem " << getFreeMem() << endl;
std::clock_t start1 = std::clock();
// here we need to determine the column count and composition
queue<string> op_v(op_value);
queue<string> op_vx;
set<string> field_names;
map<string,string> aliases;
string tt;
for(int i=0; !op_v.empty(); ++i, op_v.pop()) {
if(a->columnNames.find(op_v.front()) != a->columnNames.end()) {
field_names.insert(op_v.front());
if(aliases.count(op_v.front()) == 0 && aliases.size() < ll) {
tt = op_v.front();
op_v.pop();
aliases[tt] = op_v.front();
};
};
};
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
op_vx.push(*it);
};
// find out how many columns a new set will have
queue<string> op_t(op_type);
int_type col_count = 0;
for(int i=0; !op_t.empty(); ++i, op_t.pop())
if((op_t.front()).compare("emit sel_name") == 0)
col_count++;
CudaSet *b, *c;
curr_segment = 10000000;
//setSegments(a, op_vx);
//cout << "segs " << a->segCount << endl;
//exit(0);
allocColumns(a, op_vx);
unsigned int cycle_count;
if(!a->prm.empty())
cycle_count = varNames[setMap[op_value.front()]]->segCount;
else
cycle_count = a->segCount;
unsigned long long int ol_count = a->mRecCount;
unsigned int cnt;
//varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
a->oldRecCount = a->mRecCount;
b = new CudaSet(0, col_count);
bool b_set = 0, c_set = 0;
unsigned int long long tmp_size = a->mRecCount;
if(a->segCount > 1)
tmp_size = a->maxRecs;
boost::unordered_map<long long int, unsigned int> mymap; //this is where we keep the hashes of the records
vector<thrust::device_vector<int_type> > distinct_val; //keeps array of DISTINCT values for every key
vector<thrust::device_vector<int_type> > distinct_hash; //keeps array of DISTINCT values for every key
vector<thrust::device_vector<int_type> > distinct_tmp;
for(unsigned int i = 0; i < distinct_cnt; i++) {
distinct_tmp.push_back(thrust::device_vector<int_type>(tmp_size));
distinct_val.push_back(thrust::device_vector<int_type>());
distinct_hash.push_back(thrust::device_vector<int_type>());
};
// find out how many string columns we have. Add int_type columns to store string hashes for sort/groupby ops.
stack<string> op_s = op_v2;
int_col_count = a->d_columns_int.size();
while(!op_s.empty()) {
int colInd = (a->columnNames).find(op_s.top())->second;
if (a->type[colInd] == 2) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
};
op_s.pop();
};
unsigned int s_cnt;
bool one_liner;
for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE
cout << "cycle " << i << " select mem " << getFreeMem() << endl;
reset_offsets();
op_s = op_v2;
s_cnt = 0;
while(!op_s.empty()) {
int colInd = (a->columnNames).find(op_s.top())->second;
if (a->type[colInd] == 2) {
a->d_columns_int[int_col_count + s_cnt].resize(0);
a->add_hashed_strings(op_s.top(), i, int_col_count + s_cnt);
s_cnt++;
};
op_s.pop();
};
cnt = 0;
copyColumns(a, op_vx, i, cnt);
if(a->mRecCount) {
if (ll != 0) {
order_inplace(a,op_v2,field_names,i);
a->GroupBy(op_v2, int_col_count);
};
for(unsigned int z = int_col_count; z < a->d_columns_int.size()-1; z++)
a->d_columns_int[z].resize(0);
select(op_type,op_value,op_nums, op_nums_f,a,b, distinct_tmp, one_liner);
if(!b_set) {
for ( map<string,int>::iterator it=b->columnNames.begin() ; it != b->columnNames.end(); ++it )
setMap[(*it).first] = s;
b_set = 1;
unsigned int old_cnt = b->mRecCount;
b->mRecCount = 0;
b->resize(varNames[setMap[op_vx.front()]]->maxRecs);
b->mRecCount = old_cnt;
};
if (!c_set) {
c = new CudaSet(0, col_count);
create_c(c,b);
c_set = 1;
};
if (ll != 0 && cycle_count > 1 ) {
add(c,b,op_v3, mymap, aliases, distinct_tmp, distinct_val, distinct_hash, a);
}
else {
//copy b to c
unsigned int c_offset = c->mRecCount;
c->resize(b->mRecCount);
for(unsigned int j=0; j < b->mColumnCount; j++) {
if (b->type[j] == 0) {
thrust::copy(b->d_columns_int[b->type_index[j]].begin(), b->d_columns_int[b->type_index[j]].begin() + b->mRecCount, c->h_columns_int[c->type_index[j]].begin() + c_offset);
}
else if (b->type[j] == 1) {
thrust::copy(b->d_columns_float[b->type_index[j]].begin(), b->d_columns_float[b->type_index[j]].begin() + b->mRecCount, c->h_columns_float[c->type_index[j]].begin() + c_offset);
}
else {
hipMemcpy((void*)(thrust::raw_pointer_cast(c->h_columns_char[c->type_index[j]] + b->char_size[b->type_index[j]]*c_offset)), (void*)thrust::raw_pointer_cast(b->d_columns_char[b->type_index[j]]),
b->char_size[b->type_index[j]] * b->mRecCount, hipMemcpyDeviceToHost);
};
};
};
};
};
a->mRecCount = ol_count;
a->mRecCount = a->oldRecCount;
a->deAllocOnDevice();
b->deAllocOnDevice();
if (ll != 0) {
count_avg(c, mymap, distinct_hash);
}
else {
if(one_liner) {
count_simple(c);
};
};
reset_offsets();
c->maxRecs = c->mRecCount;
c->name = s;
c->keep = 1;
for ( map<string,int>::iterator it=c->columnNames.begin() ; it != c->columnNames.end(); ++it ) {
setMap[(*it).first] = s;
};
cout << "final select " << c->mRecCount << endl;
clean_queues();
varNames[s] = c;
b->free();
varNames[s]->keep = 1;
if(stat[s] == statement_count) {
varNames[s]->free();
varNames.erase(s);
};
if(stat[f] == statement_count && a->keep == 0) {
a->free();
varNames.erase(f);
};
std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_filter(char *s, char *f, int e)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Filter : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
std::clock_t start1 = std::clock();
if(a->mRecCount == 0) {
b = new CudaSet(0,1);
}
else {
cout << "FILTER " << s << " " << f << " " << getFreeMem() << endl;
b = a->copyDeviceStruct();
b->name = s;
unsigned int cycle_count = 1, cnt = 0;
allocColumns(a, op_value);
varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
if(a->segCount != 1)
cycle_count = varNames[setMap[op_value.front()]]->segCount;
oldCount = a->mRecCount;
thrust::device_vector<unsigned int> p(a->maxRecs);
for(unsigned int i = 0; i < cycle_count; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, a, i);
cout << "MAP CHECK " << map_check << endl;
reset_offsets();
if(map_check == 'R') {
copyColumns(a, op_value, i, cnt);
filter(op_type,op_value,op_nums, op_nums_f,a, b, i, p);
}
else {
setPrm(a,b,map_check,i);
}
};
a->mRecCount = oldCount;
varNames[setMap[op_value.front()]]->mRecCount = varNames[setMap[op_value.front()]]->oldRecCount;
cout << "filter is finished " << b->mRecCount << " " << getFreeMem() << endl;
a->deAllocOnDevice();
};
clean_queues();
if (varNames.count(s) > 0)
varNames[s]->free();
varNames[s] = b;
if(stat[s] == statement_count) {
b->free();
varNames.erase(s);
};
if(stat[f] == statement_count && !a->keep) {
//a->free();
//varNames.erase(f);
};
std::cout<< "filter time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
}
void emit_store(char *s, char *f, char* sep)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
cout << "STORE: " << s << " " << f << " " << sep << endl;
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
a->Store(f,sep, limit, 0);
if(stat[s] == statement_count && a->keep == 0) {
a->free();
varNames.erase(s);
};
};
void emit_store_binary(char *s, char *f)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
if(stat[f] == statement_count)
a->deAllocOnDevice();
printf("STORE: %s %s \n", s, f);
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
total_count = 0;
total_segments = 0;
fact_file_loaded = 0;
while(!fact_file_loaded) {
cout << "LOADING " << f_file << " " << separator << " " << getFreeMem() << endl;
if(a->text_source)
fact_file_loaded = a->LoadBigFile(f_file.c_str(), separator.c_str());
cout << "Writing a file " << endl;
a->Store(f,"", limit, 1);
cout << "Finished writing a file " << endl;
};
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(s);
};
};
void emit_load_binary(char *s, char *f, int d)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("BINARY LOAD: %s %s \n", s, f);
CudaSet *a;
unsigned int segCount, maxRecs;
char f1[100];
strcpy(f1, f);
strcat(f1,".");
char col_pos[3];
itoaa(cols.front(),col_pos);
strcat(f1,col_pos);
strcat(f1,".header");
FILE* ff = fopen(f1, "rb");
if(ff == NULL) {
cout << "Couldn't open file " << f1 << endl;
exit(0);
};
fread((char *)&totalRecs, 8, 1, ff);
fread((char *)&segCount, 4, 1, ff);
fread((char *)&maxRecs, 4, 1, ff);
fclose(ff);
cout << "Reading " << totalRecs << " records" << endl;
queue<string> names(namevars);
while(!names.empty()) {
setMap[names.front()] = s;
names.pop();
};
a = new CudaSet(namevars, typevars, sizevars, cols,totalRecs, f);
a->segCount = segCount;
a->maxRecs = maxRecs;
a->keep = 1;
varNames[s] = a;
if(stat[s] == statement_count ) {
a->free();
varNames.erase(s);
};
}
void emit_load(char *s, char *f, int d, char* sep)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("LOAD: %s %s %d %s \n", s, f, d, sep);
CudaSet *a;
a = new CudaSet(namevars, typevars, sizevars, cols, process_count);
a->mRecCount = 0;
a->resize(process_count);
a->keep = true;
a->not_compressed = 1;
string separator1(sep);
separator = separator1;
string ff(f);
f_file = ff;
a->maxRecs = a->mRecCount;
a->segCount = 0;
varNames[s] = a;
if(stat[s] == statement_count) {
a->free();
varNames.erase(s);
};
}
void yyerror(char *s, ...)
{
extern int yylineno;
va_list ap;
va_start(ap, s);
fprintf(stderr, "%d: error: ", yylineno);
vfprintf(stderr, s, ap);
fprintf(stderr, "\n");
}
void clean_queues()
{
while(!op_type.empty()) op_type.pop();
while(!op_value.empty()) op_value.pop();
while(!op_join.empty()) op_join.pop();
while(!op_nums.empty()) op_nums.pop();
while(!op_nums_f.empty()) op_nums_f.pop();
while(!j_col_count.empty()) j_col_count.pop();
while(!namevars.empty()) namevars.pop();
while(!typevars.empty()) typevars.pop();
while(!sizevars.empty()) sizevars.pop();
while(!cols.empty()) cols.pop();
sel_count = 0;
join_cnt = 0;
join_col_cnt = 0;
distinct_cnt = 0;
reset_offsets();
}
int main(int ac, char **av)
{
extern FILE *yyin;
//hipDeviceProp_t deviceProp;
//hipGetDeviceProperties(&deviceProp, 0);
//if (!deviceProp.canMapHostMemory)
// cout << "Device 0 cannot map host memory" << endl;
//hipSetDeviceFlags(hipDeviceMapHost);
cudppCreate(&theCudpp);
long long int r30 = RAND_MAX*rand()+rand();
long long int s30 = RAND_MAX*rand()+rand();
long long int t4 = rand() & 0xf;
hash_seed = (r30 << 34) + (s30 << 4) + t4;
if (ac == 1) {
cout << "Usage : alenka -l process_count script.sql" << endl;
exit(1);
};
if(strcmp(av[1],"-l") == 0) {
process_count = atoff(av[2]);
cout << "Process count = " << process_count << endl;
}
else {
process_count = 6200000;
cout << "Process count = 6200000 " << endl;
};
if((yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[ac-1]);
exit(1);
};
if(yyparse()) {
printf("SQL scan parse failed\n");
exit(1);
};
scan_state = 1;
std::clock_t start1 = std::clock();
statement_count = 0;
clean_queues();
if(ac > 1 && (yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[1]);
exit(1);
}
PROC_FLUSH_BUF ( yyin );
statement_count = 0;
if(!yyparse())
cout << "SQL scan parse worked" << endl;
else
cout << "SQL scan parse failed" << endl;
if(alloced_sz)
hipFree(alloced_tmp);
fclose(yyin);
std::cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
cudppDestroy(theCudpp);
}
| 2fcd3290c8c842f22f68de4402dc6307221eb434.cu |
/* A Bison parser, made by GNU Bison 2.4.1. */
/* Skeleton implementation for Bison's Yacc-like parsers in C
Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* C LALR(1) parser skeleton written by Richard Stallman, by
simplifying the original so-called "semantic" parser. */
/* All symbols defined below should begin with yy or YY, to avoid
infringing on user name space. This should be done even for local
variables, as they might otherwise be expanded by user macros.
There are some unavoidable exceptions within include files to
define necessary library symbols; they are noted "INFRINGES ON
USER NAME SPACE" below. */
/* Identify Bison output. */
#define YYBISON 1
/* Bison version. */
#define YYBISON_VERSION "2.4.1"
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
/* Pure parsers. */
#define YYPURE 0
/* Push parsers. */
#define YYPUSH 0
/* Pull parsers. */
#define YYPULL 1
/* Using locations. */
#define YYLSP_NEEDED 0
/* Copy the first part of user declarations. */
/* Line 189 of yacc.c */
#line 17 "c:\\GnuWin32\\bin\\alenka\\bison.y"
#include "lex.yy.c"
#include "cm.h"
void clean_queues();
void order_inplace(CudaSet* a, stack<string> exe_type);
void yyerror(char *s, ...);
void emit(char *s, ...);
void emit_mul();
void emit_add();
void emit_minus();
void emit_distinct();
void emit_div();
void emit_and();
void emit_eq();
void emit_or();
void emit_cmp(int val);
void emit_var(char *s, int c, char *f);
void emit_var_asc(char *s);
void emit_var_desc(char *s);
void emit_name(char *name);
void emit_count();
void emit_sum();
void emit_average();
void emit_min();
void emit_max();
void emit_string(char *str);
void emit_number(int_type val);
void emit_float(float_type val);
void emit_decimal(float_type val);
void emit_sel_name(char* name);
void emit_limit(int val);
void emit_union(char *s, char *f1, char *f2);
void emit_varchar(char *s, int c, char *f, int d);
void emit_load(char *s, char *f, int d, char* sep);
void emit_load_binary(char *s, char *f, int d);
void emit_store(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f);
void emit_filter(char *s, char *f, int e);
void emit_order(char *s, char *f, int e, int ll = 0);
void emit_group(char *s, char *f, int e);
void emit_select(char *s, char *f, int ll);
void emit_join(char *s, char *j1, int grp);
void emit_join_tab(char *s, bool left);
void emit_distinct();
/* Line 189 of yacc.c */
#line 124 "c:\\GnuWin32\\bin\\alenka\\bison.cu"
/* Enabling traces. */
#ifndef YYDEBUG
# define YYDEBUG 0
#endif
/* Enabling verbose error messages. */
#ifdef YYERROR_VERBOSE
# undef YYERROR_VERBOSE
# define YYERROR_VERBOSE 1
#else
# define YYERROR_VERBOSE 0
#endif
/* Enabling the token table. */
#ifndef YYTOKEN_TABLE
# define YYTOKEN_TABLE 0
#endif
/* Tokens. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
/* Put the tokens into the symbol table, so that GDB and other debuggers
know about them. */
enum yytokentype {
FILENAME = 258,
NAME = 259,
STRING = 260,
INTNUM = 261,
DECIMAL1 = 262,
BOOL1 = 263,
APPROXNUM = 264,
USERVAR = 265,
ASSIGN = 266,
EQUAL = 267,
OR = 268,
XOR = 269,
AND = 270,
DISTINCT = 271,
REGEXP = 272,
LIKE = 273,
IS = 274,
IN = 275,
NOT = 276,
BETWEEN = 277,
COMPARISON = 278,
SHIFT = 279,
MOD = 280,
UMINUS = 281,
LOAD = 282,
STREAM = 283,
FILTER = 284,
BY = 285,
JOIN = 286,
STORE = 287,
INTO = 288,
GROUP = 289,
FROM = 290,
SELECT = 291,
AS = 292,
ORDER = 293,
ASC = 294,
DESC = 295,
COUNT = 296,
USING = 297,
SUM = 298,
AVG = 299,
MIN = 300,
MAX = 301,
LIMIT = 302,
ON = 303,
BINARY = 304,
LEFT = 305
};
#endif
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE
{
/* Line 214 of yacc.c */
#line 67 "c:\\GnuWin32\\bin\\alenka\\bison.y"
int intval;
float floatval;
char *strval;
int subtok;
/* Line 214 of yacc.c */
#line 219 "c:\\GnuWin32\\bin\\alenka\\bison.cu"
} YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
#endif
/* Copy the second part of user declarations. */
/* Line 264 of yacc.c */
#line 231 "c:\\GnuWin32\\bin\\alenka\\bison.cu"
#ifdef short
# undef short
#endif
#ifdef YYTYPE_UINT8
typedef YYTYPE_UINT8 yytype_uint8;
#else
typedef unsigned char yytype_uint8;
#endif
#ifdef YYTYPE_INT8
typedef YYTYPE_INT8 yytype_int8;
#elif (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
typedef signed char yytype_int8;
#else
typedef short int yytype_int8;
#endif
#ifdef YYTYPE_UINT16
typedef YYTYPE_UINT16 yytype_uint16;
#else
typedef unsigned short int yytype_uint16;
#endif
#ifdef YYTYPE_INT16
typedef YYTYPE_INT16 yytype_int16;
#else
typedef short int yytype_int16;
#endif
#ifndef YYSIZE_T
# ifdef __SIZE_TYPE__
# define YYSIZE_T __SIZE_TYPE__
# elif defined size_t
# define YYSIZE_T size_t
# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
# define YYSIZE_T size_t
# else
# define YYSIZE_T unsigned int
# endif
#endif
#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
#ifndef YY_
# if YYENABLE_NLS
# if ENABLE_NLS
# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
# define YY_(msgid) dgettext ("bison-runtime", msgid)
# endif
# endif
# ifndef YY_
# define YY_(msgid) msgid
# endif
#endif
/* Suppress unused-variable warnings by "using" E. */
#if ! defined lint || defined __GNUC__
# define YYUSE(e) ((void) (e))
#else
# define YYUSE(e) /* empty */
#endif
/* Identity function, used to suppress warnings about constant conditions. */
#ifndef lint
# define YYID(n) (n)
#else
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static int
YYID (int yyi)
#else
static int
YYID (yyi)
int yyi;
#endif
{
return yyi;
}
#endif
#if ! defined yyoverflow || YYERROR_VERBOSE
/* The parser invokes alloca or malloc; define the necessary symbols. */
# ifdef YYSTACK_USE_ALLOCA
# if YYSTACK_USE_ALLOCA
# ifdef __GNUC__
# define YYSTACK_ALLOC __builtin_alloca
# elif defined __BUILTIN_VA_ARG_INCR
# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
# elif defined _AIX
# define YYSTACK_ALLOC __alloca
# elif defined _MSC_VER
# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
# define alloca _alloca
# else
# define YYSTACK_ALLOC alloca
# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# endif
# endif
# endif
# ifdef YYSTACK_ALLOC
/* Pacify GCC's `empty if-body' warning. */
# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
# ifndef YYSTACK_ALLOC_MAXIMUM
/* The OS might guarantee only one guard page at the bottom of the stack,
and a page size can be as small as 4096 bytes. So we cannot safely
invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
to allow for a few compiler-allocated temporary stack slots. */
# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
# endif
# else
# define YYSTACK_ALLOC YYMALLOC
# define YYSTACK_FREE YYFREE
# ifndef YYSTACK_ALLOC_MAXIMUM
# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
# endif
# if (defined __cplusplus && ! defined _STDLIB_H \
&& ! ((defined YYMALLOC || defined malloc) \
&& (defined YYFREE || defined free)))
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# ifndef YYMALLOC
# define YYMALLOC malloc
# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# ifndef YYFREE
# define YYFREE free
# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void free (void *); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# endif
#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
#if (! defined yyoverflow \
&& (! defined __cplusplus \
|| (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
/* A type that is properly aligned for any stack member. */
union yyalloc
{
yytype_int16 yyss_alloc;
YYSTYPE yyvs_alloc;
};
/* The size of the maximum gap between one aligned stack and the next. */
# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
/* The size of an array large to enough to hold all stacks, each with
N elements. */
# define YYSTACK_BYTES(N) \
((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ YYSTACK_GAP_MAXIMUM)
/* Copy COUNT objects from FROM to TO. The source and destination do
not overlap. */
# ifndef YYCOPY
# if defined __GNUC__ && 1 < __GNUC__
# define YYCOPY(To, From, Count) \
__builtin_memcpy (To, From, (Count) * sizeof (*(From)))
# else
# define YYCOPY(To, From, Count) \
do \
{ \
YYSIZE_T yyi; \
for (yyi = 0; yyi < (Count); yyi++) \
(To)[yyi] = (From)[yyi]; \
} \
while (YYID (0))
# endif
# endif
/* Relocate STACK from its old location to the new one. The
local variables YYSIZE and YYSTACKSIZE give the old and new number of
elements in the stack, and YYPTR gives the new location of the
stack. Advance YYPTR to a properly aligned location for the next
stack. */
# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
do \
{ \
YYSIZE_T yynewbytes; \
YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
Stack = &yyptr->Stack_alloc; \
yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
yyptr += yynewbytes / sizeof (*yyptr); \
} \
while (YYID (0))
#endif
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 8
/* YYLAST -- Last index in YYTABLE. */
#define YYLAST 457
/* YYNTOKENS -- Number of terminals. */
#define YYNTOKENS 68
/* YYNNTS -- Number of nonterminals. */
#define YYNNTS 13
/* YYNRULES -- Number of rules. */
#define YYNRULES 64
/* YYNRULES -- Number of states. */
#define YYNSTATES 161
/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
#define YYUNDEFTOK 2
#define YYMAXUTOK 305
#define YYTRANSLATE(YYX) \
((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
static const yytype_uint8 yytranslate[] =
{
0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 21, 2, 2, 2, 32, 26, 2,
61, 62, 30, 28, 67, 29, 63, 31, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 66, 60,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 34, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 64, 25, 65, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 22, 23, 24, 27,
33, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59
};
#if YYDEBUG
/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
YYRHS. */
static const yytype_uint16 yyprhs[] =
{
0, 0, 3, 6, 10, 12, 20, 33, 43, 49,
56, 65, 75, 82, 84, 88, 90, 92, 94, 96,
98, 100, 110, 117, 120, 123, 128, 133, 138, 143,
148, 151, 155, 159, 163, 167, 171, 175, 179, 183,
187, 191, 195, 198, 201, 205, 211, 215, 219, 224,
225, 229, 233, 239, 241, 245, 247, 251, 252, 254,
257, 262, 268, 274, 275
};
/* YYRHS -- A `-1'-separated list of the rules' RHS. */
static const yytype_int8 yyrhs[] =
{
69, 0, -1, 70, 60, -1, 69, 70, 60, -1,
71, -1, 4, 11, 45, 74, 44, 4, 73, -1,
4, 11, 36, 3, 51, 61, 3, 62, 46, 61,
75, 62, -1, 4, 11, 36, 3, 58, 46, 61,
75, 62, -1, 4, 11, 38, 4, 78, -1, 4,
11, 47, 4, 39, 77, -1, 4, 11, 45, 74,
44, 4, 79, 73, -1, 41, 4, 42, 3, 51,
61, 3, 62, 80, -1, 41, 4, 42, 3, 80,
58, -1, 4, -1, 4, 63, 4, -1, 10, -1,
5, -1, 6, -1, 9, -1, 7, -1, 8, -1,
4, 64, 6, 65, 66, 4, 61, 6, 62, -1,
4, 64, 6, 65, 66, 4, -1, 4, 48, -1,
4, 49, -1, 50, 61, 72, 62, -1, 52, 61,
72, 62, -1, 53, 61, 72, 62, -1, 54, 61,
72, 62, -1, 55, 61, 72, 62, -1, 16, 72,
-1, 72, 28, 72, -1, 72, 29, 72, -1, 72,
30, 72, -1, 72, 31, 72, -1, 72, 32, 72,
-1, 72, 33, 72, -1, 72, 15, 72, -1, 72,
12, 72, -1, 72, 13, 72, -1, 72, 14, 72,
-1, 72, 27, 72, -1, 22, 72, -1, 21, 72,
-1, 72, 24, 72, -1, 72, 24, 61, 71, 62,
-1, 61, 72, 62, -1, 72, 19, 8, -1, 72,
19, 22, 8, -1, -1, 43, 39, 76, -1, 72,
46, 4, -1, 74, 67, 72, 46, 4, -1, 72,
-1, 75, 67, 72, -1, 72, -1, 72, 67, 76,
-1, -1, 76, -1, 39, 72, -1, 40, 4, 57,
72, -1, 59, 40, 4, 57, 72, -1, 40, 4,
57, 72, 79, -1, -1, 56, 6, -1
};
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const yytype_uint8 yyrline[] =
{
0, 140, 140, 141, 145, 148, 150, 152, 154, 156,
158, 160, 162, 167, 168, 169, 170, 171, 172, 173,
174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
184, 188, 189, 190, 191, 192, 193, 195, 196, 197,
198, 199, 200, 201, 202, 204, 205, 209, 210, 213,
216, 220, 221, 225, 226, 230, 231, 234, 236, 239,
242, 243, 244, 246, 249
};
#endif
#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
First, the terminals, then, starting at YYNTOKENS, nonterminals. */
static const char *const yytname[] =
{
"$end", "error", "$undefined", "FILENAME", "NAME", "STRING", "INTNUM",
"DECIMAL1", "BOOL1", "APPROXNUM", "USERVAR", "ASSIGN", "EQUAL", "OR",
"XOR", "AND", "DISTINCT", "REGEXP", "LIKE", "IS", "IN", "'!'", "NOT",
"BETWEEN", "COMPARISON", "'|'", "'&'", "SHIFT", "'+'", "'-'", "'*'",
"'/'", "'%'", "MOD", "'^'", "UMINUS", "LOAD", "STREAM", "FILTER", "BY",
"JOIN", "STORE", "INTO", "GROUP", "FROM", "SELECT", "AS", "ORDER", "ASC",
"DESC", "COUNT", "USING", "SUM", "AVG", "MIN", "MAX", "LIMIT", "ON",
"BINARY", "LEFT", "';'", "'('", "')'", "'.'", "'{'", "'}'", "':'", "','",
"$accept", "stmt_list", "stmt", "select_stmt", "expr", "opt_group_list",
"expr_list", "load_list", "val_list", "opt_val_list", "opt_where",
"join_list", "opt_limit", 0
};
#endif
# ifdef YYPRINT
/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
token YYLEX-NUM. */
static const yytype_uint16 yytoknum[] =
{
0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
275, 33, 276, 277, 278, 124, 38, 279, 43, 45,
42, 47, 37, 280, 94, 281, 282, 283, 284, 285,
286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
59, 40, 41, 46, 123, 125, 58, 44
};
# endif
/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
static const yytype_uint8 yyr1[] =
{
0, 68, 69, 69, 70, 71, 71, 71, 71, 71,
71, 71, 71, 72, 72, 72, 72, 72, 72, 72,
72, 72, 72, 72, 72, 72, 72, 72, 72, 72,
72, 72, 72, 72, 72, 72, 72, 72, 72, 72,
72, 72, 72, 72, 72, 72, 72, 72, 72, 73,
73, 74, 74, 75, 75, 76, 76, 77, 77, 78,
79, 79, 79, 80, 80
};
/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
static const yytype_uint8 yyr2[] =
{
0, 2, 2, 3, 1, 7, 12, 9, 5, 6,
8, 9, 6, 1, 3, 1, 1, 1, 1, 1,
1, 9, 6, 2, 2, 4, 4, 4, 4, 4,
2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 2, 2, 3, 5, 3, 3, 4, 0,
3, 3, 5, 1, 3, 1, 3, 0, 1, 2,
4, 5, 5, 0, 2
};
/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
STATE-NUM when YYTABLE doesn't specify something else to do. Zero
means the default is an error. */
static const yytype_uint8 yydefact[] =
{
0, 0, 0, 0, 0, 4, 0, 0, 1, 0,
2, 0, 0, 0, 0, 0, 3, 0, 0, 13,
16, 17, 19, 20, 18, 15, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 63, 0,
0, 0, 8, 23, 24, 0, 0, 30, 43, 42,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 57, 0, 0, 0, 0, 0, 59, 14,
0, 0, 0, 0, 0, 0, 46, 38, 39, 40,
37, 47, 0, 0, 44, 41, 31, 32, 33, 34,
35, 36, 51, 49, 0, 55, 58, 9, 0, 64,
12, 0, 0, 0, 25, 26, 27, 28, 29, 48,
13, 0, 0, 0, 0, 5, 49, 0, 0, 0,
0, 53, 0, 0, 45, 0, 0, 0, 10, 52,
56, 63, 0, 7, 0, 22, 0, 50, 0, 11,
0, 54, 0, 60, 0, 0, 0, 62, 61, 6,
21
};
/* YYDEFGOTO[NTERM-NUM]. */
static const yytype_int16 yydefgoto[] =
{
-1, 3, 4, 5, 105, 125, 36, 132, 106, 107,
42, 126, 75
};
/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
STATE-NUM. */
#define YYPACT_NINF -126
static const yytype_int16 yypact[] =
{
6, -3, 5, 1, -44, -126, 59, -18, -126, -34,
-126, 53, 64, 113, 65, 54, -126, -39, 31, 37,
-126, -126, -126, -126, -126, -126, 113, 113, 113, 33,
41, 67, 71, 75, 113, 337, -42, 76, 27, 77,
78, 113, -126, -126, -126, 122, 133, 401, 411, 411,
113, 113, 113, 113, 113, 182, 113, 113, 113, 113,
-2, 138, 113, 113, 113, 113, 113, 113, 113, 136,
145, 113, 113, 89, 146, 93, 150, 94, 386, -126,
91, 204, 226, 248, 270, 292, -126, 386, 3, 154,
401, -126, 149, 55, 418, 424, 81, 81, -126, -126,
-126, -126, -126, -36, 363, 60, -126, -126, 155, -126,
-126, 99, 113, 96, -126, -126, -126, -126, -126, -126,
18, 102, 166, 132, 135, -126, 129, 172, 113, 115,
134, 386, 36, 175, -126, 141, 113, 185, -126, -126,
-126, 144, 142, -126, 113, 143, 113, -126, 148, -126,
113, 386, 196, 315, 113, 63, 158, -126, 386, -126,
-126
};
/* YYPGOTO[NTERM-NUM]. */
static const yytype_int16 yypgoto[] =
{
-126, -126, 205, 114, -13, 95, -126, 72, -125, -126,
-126, 73, 83
};
/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
positive, shift that token. If negative, reduce the rule which
number is the opposite. If zero, do what YYDEFACT says.
If YYTABLE_NINF, syntax error. */
#define YYTABLE_NINF -1
static const yytype_uint8 yytable[] =
{
35, 8, 70, 140, 122, 1, 91, 123, 6, 7,
1, 147, 39, 47, 48, 49, 10, 58, 59, 40,
92, 55, 60, 124, 15, 71, 16, 61, 78, 6,
62, 63, 64, 65, 66, 67, 68, 81, 82, 83,
84, 85, 2, 87, 88, 89, 90, 2, 94, 95,
96, 97, 98, 99, 100, 101, 17, 38, 104, 120,
20, 21, 22, 23, 24, 25, 43, 44, 18, 37,
41, 26, 56, 57, 58, 59, 27, 28, 73, 60,
55, 45, 46, 74, 61, 43, 44, 62, 63, 64,
65, 66, 67, 68, 50, 11, 2, 12, 143, 131,
45, 46, 51, 144, 13, 29, 14, 30, 31, 32,
33, 65, 66, 67, 68, 72, 34, 19, 20, 21,
22, 23, 24, 25, 77, 159, 79, 128, 52, 26,
144, 151, 53, 153, 27, 28, 54, 131, 76, 80,
102, 158, 19, 20, 21, 22, 23, 24, 25, 103,
108, 110, 109, 111, 26, 112, 113, 119, 129, 27,
28, 130, 133, 29, 134, 30, 31, 32, 33, 59,
135, 136, 123, 60, 34, 137, 139, 141, 61, 145,
142, 62, 63, 64, 65, 66, 67, 68, 29, 148,
30, 31, 32, 33, 56, 57, 58, 59, 146, 93,
74, 60, 156, 150, 152, 154, 61, 121, 9, 62,
63, 64, 65, 66, 67, 68, 56, 57, 58, 59,
160, 138, 155, 60, 149, 0, 157, 0, 61, 0,
0, 62, 63, 64, 65, 66, 67, 68, 56, 57,
58, 59, 0, 0, 86, 60, 0, 0, 0, 0,
61, 0, 0, 62, 63, 64, 65, 66, 67, 68,
56, 57, 58, 59, 0, 0, 114, 60, 0, 0,
0, 0, 61, 0, 0, 62, 63, 64, 65, 66,
67, 68, 56, 57, 58, 59, 0, 0, 115, 60,
0, 0, 0, 0, 61, 0, 0, 62, 63, 64,
65, 66, 67, 68, 56, 57, 58, 59, 0, 0,
116, 60, 0, 0, 0, 0, 61, 0, 0, 62,
63, 64, 65, 66, 67, 68, 0, 56, 57, 58,
59, 0, 117, 0, 60, 0, 0, 0, 0, 61,
0, 0, 62, 63, 64, 65, 66, 67, 68, 56,
57, 58, 59, 0, 118, 122, 60, 0, 0, 0,
0, 61, 0, 0, 62, 63, 64, 65, 66, 67,
68, 0, 0, 0, 124, 56, 57, 58, 59, 0,
0, 0, 60, 69, 0, 0, 0, 61, 0, 0,
62, 63, 64, 65, 66, 67, 68, 0, 56, 57,
58, 59, 0, 0, 0, 60, 0, 0, 0, 127,
61, 0, 0, 62, 63, 64, 65, 66, 67, 68,
60, 0, 0, 0, 0, 61, 0, 0, 62, 63,
64, 65, 66, 67, 68, 61, 0, 0, 62, 63,
64, 65, 66, 67, 68, 62, 63, 64, 65, 66,
67, 68, 63, 64, 65, 66, 67, 68
};
static const yytype_int16 yycheck[] =
{
13, 0, 44, 128, 40, 4, 8, 43, 11, 4,
4, 136, 51, 26, 27, 28, 60, 14, 15, 58,
22, 34, 19, 59, 42, 67, 60, 24, 41, 11,
27, 28, 29, 30, 31, 32, 33, 50, 51, 52,
53, 54, 41, 56, 57, 58, 59, 41, 61, 62,
63, 64, 65, 66, 67, 68, 3, 3, 71, 4,
5, 6, 7, 8, 9, 10, 48, 49, 4, 4,
39, 16, 12, 13, 14, 15, 21, 22, 51, 19,
93, 63, 64, 56, 24, 48, 49, 27, 28, 29,
30, 31, 32, 33, 61, 36, 41, 38, 62, 112,
63, 64, 61, 67, 45, 50, 47, 52, 53, 54,
55, 30, 31, 32, 33, 39, 61, 4, 5, 6,
7, 8, 9, 10, 46, 62, 4, 67, 61, 16,
67, 144, 61, 146, 21, 22, 61, 150, 61, 6,
4, 154, 4, 5, 6, 7, 8, 9, 10, 4,
61, 58, 6, 3, 16, 61, 65, 8, 3, 21,
22, 62, 66, 50, 62, 52, 53, 54, 55, 15,
4, 39, 43, 19, 61, 40, 4, 62, 24, 4,
46, 27, 28, 29, 30, 31, 32, 33, 50, 4,
52, 53, 54, 55, 12, 13, 14, 15, 57, 61,
56, 19, 6, 61, 61, 57, 24, 93, 3, 27,
28, 29, 30, 31, 32, 33, 12, 13, 14, 15,
62, 126, 150, 19, 141, -1, 153, -1, 24, -1,
-1, 27, 28, 29, 30, 31, 32, 33, 12, 13,
14, 15, -1, -1, 62, 19, -1, -1, -1, -1,
24, -1, -1, 27, 28, 29, 30, 31, 32, 33,
12, 13, 14, 15, -1, -1, 62, 19, -1, -1,
-1, -1, 24, -1, -1, 27, 28, 29, 30, 31,
32, 33, 12, 13, 14, 15, -1, -1, 62, 19,
-1, -1, -1, -1, 24, -1, -1, 27, 28, 29,
30, 31, 32, 33, 12, 13, 14, 15, -1, -1,
62, 19, -1, -1, -1, -1, 24, -1, -1, 27,
28, 29, 30, 31, 32, 33, -1, 12, 13, 14,
15, -1, 62, -1, 19, -1, -1, -1, -1, 24,
-1, -1, 27, 28, 29, 30, 31, 32, 33, 12,
13, 14, 15, -1, 62, 40, 19, -1, -1, -1,
-1, 24, -1, -1, 27, 28, 29, 30, 31, 32,
33, -1, -1, -1, 59, 12, 13, 14, 15, -1,
-1, -1, 19, 46, -1, -1, -1, 24, -1, -1,
27, 28, 29, 30, 31, 32, 33, -1, 12, 13,
14, 15, -1, -1, -1, 19, -1, -1, -1, 46,
24, -1, -1, 27, 28, 29, 30, 31, 32, 33,
19, -1, -1, -1, -1, 24, -1, -1, 27, 28,
29, 30, 31, 32, 33, 24, -1, -1, 27, 28,
29, 30, 31, 32, 33, 27, 28, 29, 30, 31,
32, 33, 28, 29, 30, 31, 32, 33
};
/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
symbol of state STATE-NUM. */
static const yytype_uint8 yystos[] =
{
0, 4, 41, 69, 70, 71, 11, 4, 0, 70,
60, 36, 38, 45, 47, 42, 60, 3, 4, 4,
5, 6, 7, 8, 9, 10, 16, 21, 22, 50,
52, 53, 54, 55, 61, 72, 74, 4, 3, 51,
58, 39, 78, 48, 49, 63, 64, 72, 72, 72,
61, 61, 61, 61, 61, 72, 12, 13, 14, 15,
19, 24, 27, 28, 29, 30, 31, 32, 33, 46,
44, 67, 39, 51, 56, 80, 61, 46, 72, 4,
6, 72, 72, 72, 72, 72, 62, 72, 72, 72,
72, 8, 22, 61, 72, 72, 72, 72, 72, 72,
72, 72, 4, 4, 72, 72, 76, 77, 61, 6,
58, 3, 61, 65, 62, 62, 62, 62, 62, 8,
4, 71, 40, 43, 59, 73, 79, 46, 67, 3,
62, 72, 75, 66, 62, 4, 39, 40, 73, 4,
76, 62, 46, 62, 67, 4, 57, 76, 4, 80,
61, 72, 61, 72, 57, 75, 6, 79, 72, 62,
62
};
#define yyerrok (yyerrstatus = 0)
#define yyclearin (yychar = YYEMPTY)
#define YYEMPTY (-2)
#define YYEOF 0
#define YYACCEPT goto yyacceptlab
#define YYABORT goto yyabortlab
#define YYERROR goto yyerrorlab
/* Like YYERROR except do call yyerror. This remains here temporarily
to ease the transition to the new meaning of YYERROR, for GCC.
Once GCC version 2 has supplanted version 1, this can go. */
#define YYFAIL goto yyerrlab
#define YYRECOVERING() (!!yyerrstatus)
#define YYBACKUP(Token, Value) \
do \
if (yychar == YYEMPTY && yylen == 1) \
{ \
yychar = (Token); \
yylval = (Value); \
yytoken = YYTRANSLATE (yychar); \
YYPOPSTACK (1); \
goto yybackup; \
} \
else \
{ \
yyerror (YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (YYID (0))
#define YYTERROR 1
#define YYERRCODE 256
/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
If N is 0, then set CURRENT to the empty location which ends
the previous symbol: RHS[0] (always defined). */
#define YYRHSLOC(Rhs, K) ((Rhs)[K])
#ifndef YYLLOC_DEFAULT
# define YYLLOC_DEFAULT(Current, Rhs, N) \
do \
if (YYID (N)) \
{ \
(Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
(Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
(Current).last_line = YYRHSLOC (Rhs, N).last_line; \
(Current).last_column = YYRHSLOC (Rhs, N).last_column; \
} \
else \
{ \
(Current).first_line = (Current).last_line = \
YYRHSLOC (Rhs, 0).last_line; \
(Current).first_column = (Current).last_column = \
YYRHSLOC (Rhs, 0).last_column; \
} \
while (YYID (0))
#endif
/* YY_LOCATION_PRINT -- Print the location on the stream.
This macro was not mandated originally: define only if we know
we won't break user code: when these are the locations we know. */
#ifndef YY_LOCATION_PRINT
# if YYLTYPE_IS_TRIVIAL
# define YY_LOCATION_PRINT(File, Loc) \
fprintf (File, "%d.%d-%d.%d", \
(Loc).first_line, (Loc).first_column, \
(Loc).last_line, (Loc).last_column)
# else
# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
# endif
#endif
/* YYLEX -- calling `yylex' with the right arguments. */
#ifdef YYLEX_PARAM
# define YYLEX yylex (YYLEX_PARAM)
#else
# define YYLEX yylex ()
#endif
/* Enable debugging if requested. */
#if YYDEBUG
# ifndef YYFPRINTF
# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
# define YYFPRINTF fprintf
# endif
# define YYDPRINTF(Args) \
do { \
if (yydebug) \
YYFPRINTF Args; \
} while (YYID (0))
# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
do { \
if (yydebug) \
{ \
YYFPRINTF (stderr, "%s ", Title); \
yy_symbol_print (stderr, \
Type, Value); \
YYFPRINTF (stderr, "\n"); \
} \
} while (YYID (0))
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_value_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (!yyvaluep)
return;
# ifdef YYPRINT
if (yytype < YYNTOKENS)
YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
# else
YYUSE (yyoutput);
# endif
switch (yytype)
{
default:
break;
}
}
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (yytype < YYNTOKENS)
YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
else
YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
yy_symbol_value_print (yyoutput, yytype, yyvaluep);
YYFPRINTF (yyoutput, ")");
}
/*------------------------------------------------------------------.
| yy_stack_print -- Print the state stack from its BOTTOM up to its |
| TOP (included). |
`------------------------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
#else
static void
yy_stack_print (yybottom, yytop)
yytype_int16 *yybottom;
yytype_int16 *yytop;
#endif
{
YYFPRINTF (stderr, "Stack now");
for (; yybottom <= yytop; yybottom++)
{
int yybot = *yybottom;
YYFPRINTF (stderr, " %d", yybot);
}
YYFPRINTF (stderr, "\n");
}
# define YY_STACK_PRINT(Bottom, Top) \
do { \
if (yydebug) \
yy_stack_print ((Bottom), (Top)); \
} while (YYID (0))
/*------------------------------------------------.
| Report that the YYRULE is going to be reduced. |
`------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
#else
static void
yy_reduce_print (yyvsp, yyrule)
YYSTYPE *yyvsp;
int yyrule;
#endif
{
int yynrhs = yyr2[yyrule];
int yyi;
unsigned long int yylno = yyrline[yyrule];
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
&(yyvsp[(yyi + 1) - (yynrhs)])
);
YYFPRINTF (stderr, "\n");
}
}
# define YY_REDUCE_PRINT(Rule) \
do { \
if (yydebug) \
yy_reduce_print (yyvsp, Rule); \
} while (YYID (0))
/* Nonzero means print parse trace. It is left uninitialized so that
multiple parsers can coexist. */
int yydebug;
#else /* !YYDEBUG */
# define YYDPRINTF(Args)
# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
# define YY_STACK_PRINT(Bottom, Top)
# define YY_REDUCE_PRINT(Rule)
#endif /* !YYDEBUG */
/* YYINITDEPTH -- initial size of the parser's stacks. */
#ifndef YYINITDEPTH
# define YYINITDEPTH 200
#endif
/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
if the built-in stack extension method is used).
Do not make this value too large; the results are undefined if
YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
evaluated with infinite-precision integer arithmetic. */
#ifndef YYMAXDEPTH
# define YYMAXDEPTH 10000
#endif
#if YYERROR_VERBOSE
# ifndef yystrlen
# if defined __GLIBC__ && defined _STRING_H
# define yystrlen strlen
# else
/* Return the length of YYSTR. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static YYSIZE_T
yystrlen (const char *yystr)
#else
static YYSIZE_T
yystrlen (yystr)
const char *yystr;
#endif
{
YYSIZE_T yylen;
for (yylen = 0; yystr[yylen]; yylen++)
continue;
return yylen;
}
# endif
# endif
# ifndef yystpcpy
# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
# define yystpcpy stpcpy
# else
/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
YYDEST. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static char *
yystpcpy (char *yydest, const char *yysrc)
#else
static char *
yystpcpy (yydest, yysrc)
char *yydest;
const char *yysrc;
#endif
{
char *yyd = yydest;
const char *yys = yysrc;
while ((*yyd++ = *yys++) != '\0')
continue;
return yyd - 1;
}
# endif
# endif
# ifndef yytnamerr
/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
quotes and backslashes, so that it's suitable for yyerror. The
heuristic is that double-quoting is unnecessary unless the string
contains an apostrophe, a comma, or backslash (other than
backslash-backslash). YYSTR is taken from yytname. If YYRES is
null, do not copy; instead, return the length of what the result
would have been. */
static YYSIZE_T
yytnamerr (char *yyres, const char *yystr)
{
if (*yystr == '"')
{
YYSIZE_T yyn = 0;
char const *yyp = yystr;
for (;;)
switch (*++yyp)
{
case '\'':
case ',':
goto do_not_strip_quotes;
case '\\':
if (*++yyp != '\\')
goto do_not_strip_quotes;
/* Fall through. */
default:
if (yyres)
yyres[yyn] = *yyp;
yyn++;
break;
case '"':
if (yyres)
yyres[yyn] = '\0';
return yyn;
}
do_not_strip_quotes: ;
}
if (! yyres)
return yystrlen (yystr);
return yystpcpy (yyres, yystr) - yyres;
}
# endif
/* Copy into YYRESULT an error message about the unexpected token
YYCHAR while in state YYSTATE. Return the number of bytes copied,
including the terminating null byte. If YYRESULT is null, do not
copy anything; just return the number of bytes that would be
copied. As a special case, return 0 if an ordinary "syntax error"
message will do. Return YYSIZE_MAXIMUM if overflow occurs during
size calculation. */
static YYSIZE_T
yysyntax_error (char *yyresult, int yystate, int yychar)
{
int yyn = yypact[yystate];
if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
return 0;
else
{
int yytype = YYTRANSLATE (yychar);
YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
YYSIZE_T yysize = yysize0;
YYSIZE_T yysize1;
int yysize_overflow = 0;
enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
int yyx;
# if 0
/* This is so xgettext sees the translatable formats that are
constructed on the fly. */
YY_("syntax error, unexpected %s");
YY_("syntax error, unexpected %s, expecting %s");
YY_("syntax error, unexpected %s, expecting %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
# endif
char *yyfmt;
char const *yyf;
static char const yyunexpected[] = "syntax error, unexpected %s";
static char const yyexpecting[] = ", expecting %s";
static char const yyor[] = " or %s";
char yyformat[sizeof yyunexpected
+ sizeof yyexpecting - 1
+ ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
* (sizeof yyor - 1))];
char const *yyprefix = yyexpecting;
/* Start YYX at -YYN if negative to avoid negative indexes in
YYCHECK. */
int yyxbegin = yyn < 0 ? -yyn : 0;
/* Stay within bounds of both yycheck and yytname. */
int yychecklim = YYLAST - yyn + 1;
int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
int yycount = 1;
yyarg[0] = yytname[yytype];
yyfmt = yystpcpy (yyformat, yyunexpected);
for (yyx = yyxbegin; yyx < yyxend; ++yyx)
if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
{
if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
{
yycount = 1;
yysize = yysize0;
yyformat[sizeof yyunexpected - 1] = '\0';
break;
}
yyarg[yycount++] = yytname[yyx];
yysize1 = yysize + yytnamerr (0, yytname[yyx]);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
yyfmt = yystpcpy (yyfmt, yyprefix);
yyprefix = yyor;
}
yyf = YY_(yyformat);
yysize1 = yysize + yystrlen (yyf);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
if (yysize_overflow)
return YYSIZE_MAXIMUM;
if (yyresult)
{
/* Avoid sprintf, as that infringes on the user's name space.
Don't have undefined behavior even if the translation
produced a string with the wrong number of "%s"s. */
char *yyp = yyresult;
int yyi = 0;
while ((*yyp = *yyf) != '\0')
{
if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
{
yyp += yytnamerr (yyp, yyarg[yyi++]);
yyf += 2;
}
else
{
yyp++;
yyf++;
}
}
}
return yysize;
}
}
#endif /* YYERROR_VERBOSE */
/*-----------------------------------------------.
| Release the memory associated to this symbol. |
`-----------------------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
#else
static void
yydestruct (yymsg, yytype, yyvaluep)
const char *yymsg;
int yytype;
YYSTYPE *yyvaluep;
#endif
{
YYUSE (yyvaluep);
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
switch (yytype)
{
default:
break;
}
}
/* Prevent warnings from -Wmissing-prototypes. */
#ifdef YYPARSE_PARAM
#if defined __STDC__ || defined __cplusplus
int yyparse (void *YYPARSE_PARAM);
#else
int yyparse ();
#endif
#else /* ! YYPARSE_PARAM */
#if defined __STDC__ || defined __cplusplus
int yyparse (void);
#else
int yyparse ();
#endif
#endif /* ! YYPARSE_PARAM */
/* The lookahead symbol. */
int yychar;
/* The semantic value of the lookahead symbol. */
YYSTYPE yylval;
/* Number of syntax errors so far. */
int yynerrs;
/*-------------------------.
| yyparse or yypush_parse. |
`-------------------------*/
#ifdef YYPARSE_PARAM
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void *YYPARSE_PARAM)
#else
int
yyparse (YYPARSE_PARAM)
void *YYPARSE_PARAM;
#endif
#else /* ! YYPARSE_PARAM */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void)
#else
int
yyparse ()
#endif
#endif
{
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
`yyss': related to states.
`yyvs': related to semantic values.
Refer to the stacks thru separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yytoken = 0;
yyss = yyssa;
yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
/* Initialize stack pointers.
Waste one element of value and location stack
so that they stay on the same level as the state stack.
The wasted elements are never initialized. */
yyssp = yyss;
yyvsp = yyvs;
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yyn == YYPACT_NINF)
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = YYLEX;
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yyn == 0 || yyn == YYTABLE_NINF)
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
*++yyvsp = yylval;
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
`$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 4:
/* Line 1455 of yacc.c */
#line 145 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("STMT"); ;}
break;
case 5:
/* Line 1455 of yacc.c */
#line 149 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_select((yyvsp[(1) - (7)].strval), (yyvsp[(6) - (7)].strval), (yyvsp[(7) - (7)].intval)); ;}
break;
case 6:
/* Line 1455 of yacc.c */
#line 151 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_load((yyvsp[(1) - (12)].strval), (yyvsp[(4) - (12)].strval), (yyvsp[(11) - (12)].intval), (yyvsp[(7) - (12)].strval)); ;}
break;
case 7:
/* Line 1455 of yacc.c */
#line 153 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_load_binary((yyvsp[(1) - (9)].strval), (yyvsp[(4) - (9)].strval), (yyvsp[(8) - (9)].intval)); ;}
break;
case 8:
/* Line 1455 of yacc.c */
#line 155 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_filter((yyvsp[(1) - (5)].strval), (yyvsp[(4) - (5)].strval), (yyvsp[(5) - (5)].intval));;}
break;
case 9:
/* Line 1455 of yacc.c */
#line 157 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_order((yyvsp[(1) - (6)].strval), (yyvsp[(4) - (6)].strval), (yyvsp[(6) - (6)].intval));;}
break;
case 10:
/* Line 1455 of yacc.c */
#line 159 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_join((yyvsp[(1) - (8)].strval),(yyvsp[(6) - (8)].strval),(yyvsp[(7) - (8)].intval)); ;}
break;
case 11:
/* Line 1455 of yacc.c */
#line 161 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_store((yyvsp[(2) - (9)].strval),(yyvsp[(4) - (9)].strval),(yyvsp[(7) - (9)].strval)); ;}
break;
case 12:
/* Line 1455 of yacc.c */
#line 163 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_store_binary((yyvsp[(2) - (6)].strval),(yyvsp[(4) - (6)].strval)); ;}
break;
case 13:
/* Line 1455 of yacc.c */
#line 167 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_name((yyvsp[(1) - (1)].strval)); ;}
break;
case 14:
/* Line 1455 of yacc.c */
#line 168 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("FIELDNAME %s.%s", (yyvsp[(1) - (3)].strval), (yyvsp[(3) - (3)].strval)); ;}
break;
case 15:
/* Line 1455 of yacc.c */
#line 169 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("USERVAR %s", (yyvsp[(1) - (1)].strval)); ;}
break;
case 16:
/* Line 1455 of yacc.c */
#line 170 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_string((yyvsp[(1) - (1)].strval)); ;}
break;
case 17:
/* Line 1455 of yacc.c */
#line 171 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_number((yyvsp[(1) - (1)].intval)); ;}
break;
case 18:
/* Line 1455 of yacc.c */
#line 172 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_float((yyvsp[(1) - (1)].floatval)); ;}
break;
case 19:
/* Line 1455 of yacc.c */
#line 173 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_decimal((yyvsp[(1) - (1)].intval)); ;}
break;
case 20:
/* Line 1455 of yacc.c */
#line 174 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("BOOL %d", (yyvsp[(1) - (1)].intval)); ;}
break;
case 21:
/* Line 1455 of yacc.c */
#line 175 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_varchar((yyvsp[(1) - (9)].strval), (yyvsp[(3) - (9)].intval), (yyvsp[(6) - (9)].strval), (yyvsp[(8) - (9)].intval));;}
break;
case 22:
/* Line 1455 of yacc.c */
#line 176 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_var((yyvsp[(1) - (6)].strval), (yyvsp[(3) - (6)].intval), (yyvsp[(6) - (6)].strval));;}
break;
case 23:
/* Line 1455 of yacc.c */
#line 177 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_var_asc((yyvsp[(1) - (2)].strval));;}
break;
case 24:
/* Line 1455 of yacc.c */
#line 178 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_var_desc((yyvsp[(1) - (2)].strval));;}
break;
case 25:
/* Line 1455 of yacc.c */
#line 179 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_count(); ;}
break;
case 26:
/* Line 1455 of yacc.c */
#line 180 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_sum(); ;}
break;
case 27:
/* Line 1455 of yacc.c */
#line 181 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_average(); ;}
break;
case 28:
/* Line 1455 of yacc.c */
#line 182 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_min(); ;}
break;
case 29:
/* Line 1455 of yacc.c */
#line 183 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_max(); ;}
break;
case 30:
/* Line 1455 of yacc.c */
#line 184 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_distinct(); ;}
break;
case 31:
/* Line 1455 of yacc.c */
#line 188 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_add(); ;}
break;
case 32:
/* Line 1455 of yacc.c */
#line 189 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_minus(); ;}
break;
case 33:
/* Line 1455 of yacc.c */
#line 190 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_mul(); ;}
break;
case 34:
/* Line 1455 of yacc.c */
#line 191 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_div(); ;}
break;
case 35:
/* Line 1455 of yacc.c */
#line 192 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("MOD"); ;}
break;
case 36:
/* Line 1455 of yacc.c */
#line 193 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("MOD"); ;}
break;
case 37:
/* Line 1455 of yacc.c */
#line 195 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_and(); ;}
break;
case 38:
/* Line 1455 of yacc.c */
#line 196 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_eq(); ;}
break;
case 39:
/* Line 1455 of yacc.c */
#line 197 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_or(); ;}
break;
case 40:
/* Line 1455 of yacc.c */
#line 198 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("XOR"); ;}
break;
case 41:
/* Line 1455 of yacc.c */
#line 199 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("SHIFT %s", (yyvsp[(2) - (3)].subtok)==1?"left":"right"); ;}
break;
case 42:
/* Line 1455 of yacc.c */
#line 200 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("NOT"); ;}
break;
case 43:
/* Line 1455 of yacc.c */
#line 201 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("NOT"); ;}
break;
case 44:
/* Line 1455 of yacc.c */
#line 202 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_cmp((yyvsp[(2) - (3)].subtok)); ;}
break;
case 45:
/* Line 1455 of yacc.c */
#line 204 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("CMPSELECT %d", (yyvsp[(2) - (5)].subtok)); ;}
break;
case 46:
/* Line 1455 of yacc.c */
#line 205 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{emit("EXPR");;}
break;
case 47:
/* Line 1455 of yacc.c */
#line 209 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("ISBOOL %d", (yyvsp[(3) - (3)].intval)); ;}
break;
case 48:
/* Line 1455 of yacc.c */
#line 210 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("ISBOOL %d", (yyvsp[(4) - (4)].intval)); emit("NOT"); ;}
break;
case 49:
/* Line 1455 of yacc.c */
#line 213 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ /* nil */
(yyval.intval) = 0;
;}
break;
case 50:
/* Line 1455 of yacc.c */
#line 216 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = (yyvsp[(3) - (3)].intval);}
break;
case 51:
/* Line 1455 of yacc.c */
#line 220 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; emit_sel_name((yyvsp[(3) - (3)].strval));;}
break;
case 52:
/* Line 1455 of yacc.c */
#line 221 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = (yyvsp[(1) - (5)].intval) + 1; emit_sel_name((yyvsp[(5) - (5)].strval));;}
break;
case 53:
/* Line 1455 of yacc.c */
#line 225 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; ;}
break;
case 54:
/* Line 1455 of yacc.c */
#line 226 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{(yyval.intval) = (yyvsp[(1) - (3)].intval) + 1; ;}
break;
case 55:
/* Line 1455 of yacc.c */
#line 230 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; ;}
break;
case 56:
/* Line 1455 of yacc.c */
#line 231 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1 + (yyvsp[(3) - (3)].intval); ;}
break;
case 57:
/* Line 1455 of yacc.c */
#line 234 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 59:
/* Line 1455 of yacc.c */
#line 239 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit("FILTER BY"); ;}
break;
case 60:
/* Line 1455 of yacc.c */
#line 242 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (4)].strval), 0);;}
break;
case 61:
/* Line 1455 of yacc.c */
#line 243 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(3) - (5)].strval), 1);;}
break;
case 62:
/* Line 1455 of yacc.c */
#line 244 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (5)].strval), 0); ;}
break;
case 63:
/* Line 1455 of yacc.c */
#line 246 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 64:
/* Line 1455 of yacc.c */
#line 249 "c:\\GnuWin32\\bin\\alenka\\bison.y"
{ emit_limit((yyvsp[(2) - (2)].intval)); ;}
break;
/* Line 1455 of yacc.c */
#line 2045 "c:\\GnuWin32\\bin\\alenka\\bison.cu"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now `shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*------------------------------------.
| yyerrlab -- here on detecting error |
`------------------------------------*/
yyerrlab:
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (YY_("syntax error"));
#else
{
YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
{
YYSIZE_T yyalloc = 2 * yysize;
if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
yyalloc = YYSTACK_ALLOC_MAXIMUM;
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yyalloc);
if (yymsg)
yymsg_alloc = yyalloc;
else
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
}
}
if (0 < yysize && yysize <= yymsg_alloc)
{
(void) yysyntax_error (yymsg, yystate, yychar);
yyerror (yymsg);
}
else
{
yyerror (YY_("syntax error"));
if (yysize != 0)
goto yyexhaustedlab;
}
}
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule which action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (yyn != YYPACT_NINF)
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
*++yyvsp = yylval;
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined(yyoverflow) || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval);
/* Do not reclaim the symbols of the rule which action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
/* Make sure YYID is used. */
return YYID (yyresult);
}
/* Line 1675 of yacc.c */
#line 252 "c:\\GnuWin32\\bin\\alenka\\bison.y"
#include "filter.h"
#include "select.h"
#include "merge.h"
#include "zone_map.h"
#include "atof.h"
#include "sorts.cu"
#include <limits>
#include "cudpp_src_2.0/include/cudpp_hash.h"
size_t int_size = sizeof(int_type);
size_t float_size = sizeof(float_type);
FILE *file_pointer;
queue<string> namevars;
queue<string> typevars;
queue<int> sizevars;
queue<int> cols;
queue<unsigned int> j_col_count;
unsigned int sel_count = 0;
unsigned int join_cnt = 0;
unsigned int distinct_cnt = 0;
int join_col_cnt = 0;
stack<string> op_join;
bool left_join;
unsigned int statement_count = 0;
map<string,unsigned int> stat;
bool scan_state = 0;
string separator, f_file;
unsigned int int_col_count;
CUDPPHandle theCudpp;
using namespace thrust::placeholders;
void emit_name(char *name)
{
op_type.push("NAME");
op_value.push(name);
}
void emit_limit(int val)
{
op_nums.push(val);
}
void emit_string(char *str)
{ // remove the float_type quotes
string sss(str,1, strlen(str)-2);
op_type.push("STRING");
op_value.push(sss);
}
void emit_number(int_type val)
{
op_type.push("NUMBER");
op_nums.push(val);
}
void emit_float(float_type val)
{
op_type.push("FLOAT");
op_nums_f.push(val);
}
void emit_decimal(float_type val)
{
op_type.push("DECIMAL");
op_nums_f.push(val);
}
void emit_mul()
{
op_type.push("MUL");
}
void emit_add()
{
op_type.push("ADD");
}
void emit_div()
{
op_type.push("DIV");
}
void emit_and()
{
op_type.push("AND");
join_col_cnt++;
}
void emit_eq()
{
op_type.push("JOIN");
}
void emit_distinct()
{
op_type.push("DISTINCT");
distinct_cnt++;
}
void emit_or()
{
op_type.push("OR");
}
void emit_minus()
{
op_type.push("MINUS");
}
void emit_cmp(int val)
{
op_type.push("CMP");
op_nums.push(val);
}
void emit(char *s, ...)
{
}
void emit_var(char *s, int c, char *f)
{
namevars.push(s);
typevars.push(f);
sizevars.push(0);
cols.push(c);
}
void emit_var_asc(char *s)
{
op_type.push(s);
op_value.push("ASC");
}
void emit_var_desc(char *s)
{
op_type.push(s);
op_value.push("DESC");
}
void emit_varchar(char *s, int c, char *f, int d)
{
namevars.push(s);
typevars.push(f);
sizevars.push(d);
cols.push(c);
}
void emit_sel_name(char *s)
{
op_type.push("emit sel_name");
op_value.push(s);
sel_count++;
}
void emit_count()
{
op_type.push("COUNT");
}
void emit_sum()
{
op_type.push("SUM");
}
void emit_average()
{
op_type.push("AVG");
}
void emit_min()
{
op_type.push("MIN");
}
void emit_max()
{
op_type.push("MAX");
}
void emit_join_tab(char *s, bool left)
{
op_join.push(s);
left_join = left;
};
void order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, unsigned int segment)
{
//std::clock_t start1 = std::clock();
unsigned int sz = a->mRecCount;
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(sz);
thrust::sequence(permutation, permutation+sz,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
void* temp;
// find the largest mRecSize of all data sources exe_type.top()
unsigned int maxSize = 0;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
CudaSet *t = varNames[setMap[*it]];
if(t->mRecCount > maxSize)
maxSize = t->mRecCount;
};
unsigned int max_c = max_char(a, field_names);
//cout << "max_c " << max_c << endl;
if(max_c > float_size)
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, maxSize*max_c));
else
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, maxSize*float_size));
unsigned int str_count = 0;
for(int i=0; !exe_type.empty(); ++i, exe_type.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if (a->type[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, sz, "ASC", (int_type*)temp);
else if (a->type[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, sz,"ASC", (float_type*)temp);
else {
// use int col int_col_count
update_permutation(a->d_columns_int[int_col_count+str_count], raw_ptr, sz, "ASC", (int_type*)temp);
str_count++;
};
};
str_count = 0;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
int i = a->columnNames[*it];
if (a->type[i] == 0)
apply_permutation(a->d_columns_int[a->type_index[i]], raw_ptr, sz, (int_type*)temp);
else if (a->type[i] == 1)
apply_permutation(a->d_columns_float[a->type_index[i]], raw_ptr, sz, (float_type*)temp);
else {
apply_permutation_char(a->d_columns_char[a->type_index[i]], raw_ptr, sz, (char*)temp, a->char_size[a->type_index[i]]);
apply_permutation(a->d_columns_int[int_col_count + str_count], raw_ptr, sz, (int_type*)temp);
str_count++;
};
};
cudaFree(temp);
thrust::device_free(permutation);
}
int hh = 0;
void emit_join(char *s, char *j1, int grp)
{
string j2 = op_join.top();
op_join.pop();
statement_count++;
if (scan_state == 0) {
if (stat.find(j1) == stat.end()) {
cout << "Join : couldn't find variable " << j1 << endl;
exit(1);
};
if (stat.find(j2) == stat.end()) {
cout << "Join : couldn't find variable " << j2 << endl;
exit(1);
};
stat[s] = statement_count;
stat[j1] = statement_count;
stat[j2] = statement_count;
return;
};
if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) {
clean_queues();
return;
};
CudaSet* left = varNames.find(j1)->second;
CudaSet* right = varNames.find(j2)->second;
queue<string> op_sel;
queue<string> op_sel_as;
for(int i=0; i < sel_count; i++) {
op_sel.push(op_value.front());
op_value.pop();
op_sel_as.push(op_value.front());
op_value.pop();
};
string f1 = op_value.front();
op_value.pop();
string f2 = op_value.front();
op_value.pop();
cout << "JOIN " << s << " " << getFreeMem() << endl;
//cout << "join col count " << join_col_cnt << endl;
queue<string> op_v1(op_value);
while(op_v1.size() ) {
op_v1.pop();
grp++;
};
std::clock_t start1 = std::clock();
CudaSet* c = new CudaSet(right,left,0,op_sel, op_sel_as);
if (left->mRecCount == 0 || right->mRecCount == 0) {
c = new CudaSet(left,right,0, op_sel, op_sel_as);
varNames[s] = c;
clean_queues();
return;
};
unsigned int colInd1, colInd2;
string tmpstr;
if (left->columnNames.find(f1) != left->columnNames.end()) {
colInd1 = (left->columnNames).find(f1)->second;
if (right->columnNames.find(f2) != right->columnNames.end()) {
colInd2 = (right->columnNames).find(f2)->second;
}
else {
cout << "Couldn't find column " << f2 << endl;
exit(0);
};
}
else if (right->columnNames.find(f1) != right->columnNames.end()) {
colInd2 = (right->columnNames).find(f1)->second;
tmpstr = f1;
f1 = f2;
if (left->columnNames.find(f2) != left->columnNames.end()) {
colInd1 = (left->columnNames).find(f2)->second;
f2 = tmpstr;
}
else {
cout << "Couldn't find column " << f2 << endl;
exit(0);
};
}
else {
cout << "Couldn't find column " << f1 << endl;
exit(0);
};
if (!((left->type[colInd1] == 0 && right->type[colInd2] == 0) || (left->type[colInd1] == 2 && right->type[colInd2] == 2)
|| (left->type[colInd1] == 1 && right->type[colInd2] == 1 && left->decimal[colInd1] && right->decimal[colInd2]))) {
cout << "Joins on floats are not supported " << endl;
exit(0);
};
bool decimal_join = 0;
if (left->type[colInd1] == 1 && right->type[colInd2] == 1)
decimal_join = 1;
set<string> field_names;
stack<string> exe_type;
exe_type.push(f2);
field_names.insert(f2);
bool str_join = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
if (right->type[colInd2] == 2) {
str_join = 1;
right->d_columns_int.push_back(thrust::device_vector<int_type>());
for(unsigned int i = 0; i < right->segCount; i++) {
right->add_hashed_strings(f2, i, right->d_columns_int.size()-1);
};
};
// need to allocate all right columns
queue<string> cc;
unsigned int rcount;
curr_segment = 10000000;
queue<string> op_vd(op_value);
queue<string> op_alt(op_sel);
unsigned int jc = join_col_cnt;
while(jc) {
jc--;
op_vd.pop();
op_alt.push(op_vd.front());
op_vd.pop();
};
unsigned int cnt_r = load_queue(op_alt, right, str_join, f2, rcount);
if(str_join) {
colInd2 = right->mColumnCount+1;
right->type_index[colInd2] = right->d_columns_int.size()-1;
};
//here we need to make sure that right column is ordered. If not then we order it and keep the permutation
bool sorted;
if(!decimal_join)
sorted = thrust::is_sorted(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r);
else
sorted = thrust::is_sorted(right->d_columns_float[right->type_index[colInd2]].begin(), right->d_columns_float[right->type_index[colInd2]].begin() + cnt_r);
if(!sorted) {
queue<string> ss(op_sel);
thrust::device_vector<unsigned int> v(cnt_r);
thrust::sequence(v.begin(),v.end(),0,1);
unsigned int max_c = max_char(right);
unsigned int mm;
if(max_c > 8)
mm = (max_c/8) + 1;
else
mm = 1;
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(cnt_r*mm);
thrust::sort_by_key(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r, v.begin());
//for(unsigned int i = 0; i < right->mColumnCount; i++) {
unsigned int i;
while(!ss.empty()) {
if (right->columnNames.find(ss.front()) != right->columnNames.end()) {
i = right->columnNames[ss.front()];
if(i != colInd2) {
if(right->type[i] == 0) {
thrust::gather(v.begin(), v.end(), right->d_columns_int[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->d_columns_int[right->type_index[i]].begin());
}
else if(right->type[i] == 1) {
thrust::gather(v.begin(), v.end(), right->d_columns_float[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->d_columns_float[right->type_index[i]].begin());
}
else {
str_gather(thrust::raw_pointer_cast(v.data()), cnt_r, (void*)right->d_columns_char[right->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), right->char_size[right->type_index[i]]);
cudaMemcpy( (void*)right->d_columns_char[right->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), cnt_r*right->char_size[right->type_index[i]], cudaMemcpyDeviceToDevice);
};
};
};
ss.pop();
};
thrust::device_free(d_tmp);
};
bool v64bit;
if(right->d_columns_int[right->type_index[colInd2]][cnt_r-1] > std::numeric_limits<unsigned int>::max())
v64bit = 1;
else
v64bit = 0;
while(!cc.empty())
cc.pop();
if (left->type[colInd1] == 2) {
left->d_columns_int.push_back(thrust::device_vector<int_type>());
//colInd1 = left->mColumnCount+1;
//left->type_index[colInd1] = left->d_columns_int.size()-1;
}
else {
cc.push(f1);
allocColumns(left, cc);
};
thrust::device_vector<unsigned int> d_res1;
thrust::device_vector<unsigned int> d_res2;
unsigned int cnt_l, res_count, tot_count = 0, offset = 0, k = 0;
queue<string> lc(cc);
curr_segment = 10000000;
CUDPPResult result;
CUDPPHandle hash_table_handle;
CUDPPHashTableConfig config;
config.type = CUDPP_MULTIVALUE_HASH_TABLE;
config.kInputSize = cnt_r;
config.space_usage = 1.5f;
cout << "creating table with " << cnt_r << " " << getFreeMem() << endl;
result = cudppHashTable(theCudpp, &hash_table_handle, &config);
if (result == CUDPP_SUCCESS)
cout << "hash tables created " << getFreeMem() << endl;
unsigned int tt;
if(left->maxRecs > rcount)
tt = left->maxRecs;
else
tt = rcount;
thrust::device_ptr<unsigned int> d_r = thrust::device_malloc<unsigned int>(tt);
thrust::device_vector<unsigned int> v(cnt_r);
thrust::sequence(v.begin(),v.end(),0,1);
thrust::copy(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r,
d_r);
result = cudppHashInsert(hash_table_handle, thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(v.data()), cnt_r);
if (result == CUDPP_SUCCESS)
cout << "hash table inserted " << getFreeMem() << endl;
thrust::device_ptr<uint2> res = thrust::device_malloc<uint2>(left->maxRecs);
for (unsigned int i = 0; i < left->segCount; i++) {
cout << "segment " << i << " " << getFreeMem() << endl;
cnt_l = 0;
if (left->type[colInd1] != 2) {
copyColumns(left, lc, i, cnt_l);
}
else {
left->add_hashed_strings(f1, i, left->d_columns_int.size());
};
if(left->prm.empty()) {
//copy all records
cnt_l = left->mRecCount;
}
else {
cnt_l = left->prm_count[i];
};
if (cnt_l) {
unsigned int idx;
if(!str_join)
idx = left->type_index[colInd1];
else
idx = left->d_columns_int.size()-1;
unsigned int left_sz;
if(decimal_join) {
thrust::transform(left->d_columns_float[idx].begin(), left->d_columns_float[idx].begin() + cnt_l,
d_r, float_to_int_lower());
}
else {
thrust::copy(left->d_columns_int[idx].begin(), left->d_columns_int[idx].begin() + cnt_l,
d_r);
};
//cout << "joining " << cnt_l << " with " << cnt_r << endl;
result = cudppHashRetrieve(hash_table_handle, thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(res), cnt_l);
if (result != CUDPP_SUCCESS)
cout << "Failed retrieve " << endl;
uint2 rr = thrust::reduce(res, res+cnt_l, make_uint2(0,0), Uint2Sum());
res_count = rr.y;
d_res1.resize(res_count);
d_res2.resize(res_count);
cout << "res cnt " << res_count << endl;
if(res_count) {
thrust::counting_iterator<unsigned int> begin(0);
uint2_split ff(thrust::raw_pointer_cast(res),thrust::raw_pointer_cast(d_r));
thrust::for_each(begin, begin + cnt_l, ff);
thrust::exclusive_scan(d_r, d_r+cnt_l, d_r ); // addresses
join_functor1 ff1(thrust::raw_pointer_cast(res),
thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(d_res1.data()),
thrust::raw_pointer_cast(d_res2.data()));
thrust::for_each(begin, begin + cnt_l, ff1);
if(v64bit) {// need to check the upper 32 bits
thrust::device_ptr<bool> d_add = thrust::device_malloc<bool>(d_res1.size());
if(decimal_join) {
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter_left(left->d_columns_float[idx].begin(), d_res1.begin());
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter_right(right->d_columns_float[right->type_index[colInd2]].begin(), d_res2.begin());
thrust::transform(iter_left, iter_left+d_res2.size(), iter_right, d_add, float_upper_equal_to());
}
else {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter_left(left->d_columns_int[idx].begin(), d_res1.begin());
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter_right(right->d_columns_int[right->type_index[colInd2]].begin(), d_res2.begin());
thrust::transform(iter_left, iter_left+d_res2.size(), iter_right, d_add, int_upper_equal_to());
};
unsigned int new_cnt = thrust::count(d_add, d_add+d_res1.size(), 1);
thrust::stable_partition(d_res1.begin(), d_res1.begin() + d_res2.size(), d_add, thrust::identity<unsigned int>());
thrust::stable_partition(d_res2.begin(), d_res2.end(), d_add, thrust::identity<unsigned int>());
thrust::device_free(d_add);
d_res2.resize(new_cnt);
d_res1.resize(new_cnt);
};
};
// check if the join is a multicolumn join
while(join_col_cnt) {
join_col_cnt--;
string f3 = op_value.front();
op_value.pop();
string f4 = op_value.front();
op_value.pop();
queue<string> rc;
rc.push(f3);
allocColumns(left, rc);
copyColumns(left, rc, i, cnt_l);
rc.pop();
thrust::device_ptr<bool> d_add = thrust::device_malloc<bool>(d_res1.size());
if (d_res1.size() && d_res2.size()) {
unsigned int colInd3 = (left->columnNames).find(f3)->second;
unsigned int colInd4 = (right->columnNames).find(f4)->second;
if (left->type[colInd3] == 1 && right->type[colInd4] == 1) {
if(right->d_columns_float[right->type_index[colInd4]].size() == 0)
unsigned int cnt_r = load_queue(rc, right, 0, f4, rcount);
thrust::device_ptr<int_type> d_l = thrust::device_malloc<int_type>(d_res1.size());
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter_left(left->d_columns_float[left->type_index[colInd3]].begin(), d_res1.begin());
thrust::transform(iter_left, iter_left+d_res1.size(), d_l, float_to_long());
thrust::device_ptr<int_type> d_r = thrust::device_malloc<int_type>(d_res1.size());
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter_right(right->d_columns_float[right->type_index[colInd4]].begin(), d_res2.begin());
thrust::transform(iter_right, iter_right+d_res2.size(), d_r, float_to_long());
thrust::transform(d_l, d_l+d_res1.size(), d_r, d_add, thrust::equal_to<int_type>());
}
else {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter_left(left->d_columns_int[left->type_index[colInd3]].begin(), d_res1.begin());
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter_right(right->d_columns_int[right->type_index[colInd4]].begin(), d_res2.begin());
thrust::transform(iter_left, iter_left+d_res2.size(), iter_right, d_add, thrust::equal_to<int_type>());
};
unsigned int new_cnt = thrust::count(d_add, d_add+d_res1.size(), 1);
thrust::stable_partition(d_res1.begin(), d_res1.begin() + d_res2.size(), d_add, thrust::identity<unsigned int>());
thrust::stable_partition(d_res2.begin(), d_res2.end(), d_add, thrust::identity<unsigned int>());
d_res2.resize(new_cnt);
thrust::device_free(d_add);
if(!left_join) {
d_res1.resize(new_cnt);
}
else {
left_sz = d_res1.size() - d_res2.size();
};
};
};
res_count = d_res1.size();
tot_count = tot_count + res_count;
//cout << "res " << res_count << endl;
std::clock_t start5 = std::clock();
if(res_count) {
offset = c->mRecCount;
if(i == 0 && left->segCount != 1) {
c->reserve(res_count*(left->segCount+1));
//cout << "prealloced " << left->segCount+1 << endl;
};
c->resize(res_count);
queue<string> op_sel1(op_sel);
unsigned int colInd, c_colInd;
if(left->segCount == 1) {
thrust::device_free(d_r);
thrust::device_free(res);
};
while(!op_sel1.empty()) {
while(!cc.empty())
cc.pop();
cc.push(op_sel1.front());
c_colInd = c->columnNames[op_sel1.front()];
//cout << "gathering " << op_sel1.front() << endl;
if(left->columnNames.find(op_sel1.front()) != left->columnNames.end()) {
// copy field's segment to device, gather it and copy to the host
unsigned int colInd = left->columnNames[op_sel1.front()];
reset_offsets();
allocColumns(left, cc);
copyColumns(left, cc, i, k);
//gather
if(left->type[colInd] == 0) {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter(left->d_columns_int[left->type_index[colInd]].begin(), d_res1.begin());
thrust::copy(iter, iter + res_count, c->h_columns_int[c->type_index[c_colInd]].begin() + offset);
}
else if(left->type[colInd] == 1) {
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter(left->d_columns_float[left->type_index[colInd]].begin(), d_res1.begin());
thrust::copy(iter, iter + res_count, c->h_columns_float[c->type_index[c_colInd]].begin() + offset);
}
else { //strings
thrust::device_ptr<char> d_tmp = thrust::device_malloc<char>(res_count*left->char_size[left->type_index[colInd]]);
str_gather(thrust::raw_pointer_cast(d_res1.data()), res_count, (void*)left->d_columns_char[left->type_index[colInd]],
(void*) thrust::raw_pointer_cast(d_tmp), left->char_size[left->type_index[colInd]]);
cudaMemcpy( (void*)&c->h_columns_char[c->type_index[c_colInd]][offset*c->char_size[c->type_index[c_colInd]]], (void*) thrust::raw_pointer_cast(d_tmp),
c->char_size[c->type_index[c_colInd]] * res_count, cudaMemcpyDeviceToHost);
thrust::device_free(d_tmp);
}
left->deAllocColumnOnDevice(colInd);
}
else if(right->columnNames.find(op_sel1.front()) != right->columnNames.end()) {
colInd = right->columnNames[op_sel1.front()];
//gather
if(right->type[colInd] == 0) {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter(right->d_columns_int[right->type_index[colInd]].begin(), d_res2.begin());
thrust::copy(iter, iter + d_res2.size(), c->h_columns_int[c->type_index[c_colInd]].begin() + offset);
if(left_join && left_sz) {
thrust::fill(c->h_columns_int[c->type_index[c_colInd]].begin() + offset + d_res2.size(),
c->h_columns_int[c->type_index[c_colInd]].begin() + offset + d_res2.size() + left_sz,
0);
};
}
else if(right->type[colInd] == 1) {
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter(right->d_columns_float[right->type_index[colInd]].begin(), d_res2.begin());
thrust::copy(iter, iter + d_res2.size(), c->h_columns_float[c->type_index[c_colInd]].begin() + offset);
if(left_join && left_sz) {
thrust::fill(c->h_columns_float[c->type_index[c_colInd]].begin() + offset + d_res2.size(),
c->h_columns_float[c->type_index[c_colInd]].begin() + offset + d_res2.size() + left_sz,
0);
};
}
else { //strings
thrust::device_ptr<char> d_tmp = thrust::device_malloc<char>(d_res2.size()*right->char_size[right->type_index[colInd]]);
str_gather(thrust::raw_pointer_cast(d_res2.data()), d_res2.size(), (void*)right->d_columns_char[right->type_index[colInd]],
(void*) thrust::raw_pointer_cast(d_tmp), right->char_size[right->type_index[colInd]]);
cudaMemcpy( (void*)(c->h_columns_char[c->type_index[c_colInd]] + offset*c->char_size[c->type_index[c_colInd]]), (void*) thrust::raw_pointer_cast(d_tmp),
c->char_size[c->type_index[c_colInd]] * d_res2.size(), cudaMemcpyDeviceToHost);
if(left_join && left_sz) {
memset((void*)(c->h_columns_char[c->type_index[c_colInd]] + (d_res2.size() + offset)*c->char_size[c->type_index[c_colInd]]), 0,
left_sz*c->char_size[c->type_index[c_colInd]]);
};
thrust::device_free(d_tmp);
}
}
else {
cout << "Couldn't find field " << op_sel1.front() << endl;
exit(0);
};
//cout << "gathered " << op_sel1.front() << endl;
op_sel1.pop();
};
};
};
};
d_res1.resize(0);
d_res1.shrink_to_fit();
d_res2.resize(0);
d_res2.shrink_to_fit();
left->deAllocOnDevice();
right->deAllocOnDevice();
c->deAllocOnDevice();
cudppDestroyHashTable(theCudpp, hash_table_handle);
unsigned int i = 0;
while(!col_aliases.empty()) {
c->columnNames[col_aliases.front()] = i;
col_aliases.pop();
i++;
};
varNames[s] = c;
c->mRecCount = tot_count;
c->maxRecs = tot_count;
cout << "join count " << tot_count << endl;
for ( map<string,int>::iterator it=c->columnNames.begin() ; it != c->columnNames.end(); ++it )
setMap[(*it).first] = s;
clean_queues();
if(stat[s] == statement_count) {
c->free();
varNames.erase(s);
};
if(stat[j1] == statement_count) {
left->free();
varNames.erase(j1);
};
if(stat[j2] == statement_count && (strcmp(j1,j2.c_str()) != 0)) {
right->free();
varNames.erase(j2);
};
std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void order_on_host(CudaSet *a, CudaSet* b, queue<string> names, stack<string> exe_type, stack<string> exe_value)
{
unsigned int tot = 0;
if(!a->not_compressed) { //compressed
allocColumns(a, names);
unsigned int c = 0;
if(a->prm_count.size()) {
for(unsigned int i = 0; i < a->prm.size(); i++)
c = c + a->prm_count[i];
}
else
c = a->mRecCount;
a->mRecCount = 0;
a->resize(c);
unsigned int cnt = 0;
for(unsigned int i = 0; i < a->segCount; i++) {
copyColumns(a, names, (a->segCount - i) - 1, cnt); //uses segment 1 on a host to copy data from a file to gpu
if (a->mRecCount) {
a->CopyToHost((c - tot) - a->mRecCount, a->mRecCount);
tot = tot + a->mRecCount;
};
};
}
else
tot = a->mRecCount;
b->resize(tot); //resize host arrays
a->mRecCount = tot;
unsigned int* permutation = new unsigned int[a->mRecCount];
thrust::sequence(permutation, permutation + a->mRecCount);
unsigned int maxSize = a->mRecCount;
char* temp;
unsigned int max_c = max_char(a);
if(max_c > float_size)
temp = new char[maxSize*max_c];
else
temp = new char[maxSize*float_size];
// sort on host
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation_host(a->h_columns_int[a->type_index[colInd]].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation_host(a->h_columns_float[a->type_index[colInd]].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp);
else {
update_permutation_char_host(a->h_columns_char[a->type_index[colInd]], permutation, a->mRecCount, exe_value.top(), b->h_columns_char[b->type_index[colInd]], a->char_size[a->type_index[colInd]]);
};
};
for (unsigned int i = 0; i < a->mColumnCount; i++) {
if ((a->type)[i] == 0) {
apply_permutation_host(a->h_columns_int[a->type_index[i]].data(), permutation, a->mRecCount, b->h_columns_int[b->type_index[i]].data());
}
else if ((a->type)[i] == 1)
apply_permutation_host(a->h_columns_float[a->type_index[i]].data(), permutation, a->mRecCount, b->h_columns_float[b->type_index[i]].data());
else {
apply_permutation_char_host(a->h_columns_char[a->type_index[i]], permutation, a->mRecCount, b->h_columns_char[b->type_index[i]], a->char_size[a->type_index[i]]);
};
};
delete [] temp;
delete [] permutation;
}
void emit_order(char *s, char *f, int e, int ll)
{
if(ll == 0)
statement_count++;
if (scan_state == 0 && ll == 0) {
if (stat.find(f) == stat.end()) {
cout << "Order : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end() ) {
clean_queues();
return;
};
CudaSet* a = varNames.find(f)->second;
if (a->mRecCount == 0) {
if(varNames.find(s) == varNames.end())
varNames[s] = new CudaSet(0,1);
else {
CudaSet* c = varNames.find(s)->second;
c->mRecCount = 0;
};
return;
};
stack<string> exe_type, exe_value;
cout << "order: " << s << " " << f << endl;
for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) {
if ((op_type.front()).compare("NAME") == 0) {
exe_type.push(op_value.front());
exe_value.push("ASC");
}
else {
exe_type.push(op_type.front());
exe_value.push(op_value.front());
};
};
stack<string> tp(exe_type);
queue<string> op_vx;
while (!tp.empty()) {
op_vx.push(tp.top());
tp.pop();
};
queue<string> names;
for ( map<string,int>::iterator it=a->columnNames.begin() ; it != a->columnNames.end(); ++it )
names.push((*it).first);
CudaSet *b = a->copyDeviceStruct();
//lets find out if our data set fits into a GPU
size_t mem_available = getFreeMem();
size_t rec_size = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[i] == 0)
rec_size = rec_size + int_size;
else if(a->type[i] == 1)
rec_size = rec_size + float_size;
else
rec_size = rec_size + a->char_size[a->type_index[i]];
};
bool fits;
if (rec_size*a->mRecCount > (mem_available/2)) // doesn't fit into a GPU
fits = 0;
else fits = 1;
if(!fits) {
order_on_host(a, b, names, exe_type, exe_value);
}
else {
// initialize permutation to [0, 1, 2, ... ,N-1]
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(a->mRecCount);
thrust::sequence(permutation, permutation+(a->mRecCount));
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
unsigned int maxSize = a->mRecCount;
void* temp;
unsigned int max_c = max_char(a);
if(max_c > float_size)
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, maxSize*max_c));
else
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, maxSize*float_size));
varNames[setMap[exe_type.top()]]->oldRecCount = varNames[setMap[exe_type.top()]]->mRecCount;
unsigned int rcount;
a->mRecCount = load_queue(names, a, 1, op_vx.front(), rcount);
varNames[setMap[exe_type.top()]]->mRecCount = varNames[setMap[exe_type.top()]]->oldRecCount;
unsigned int str_count = 0;
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, a->mRecCount, exe_value.top(), (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, a->mRecCount,exe_value.top(), (float_type*)temp);
else {
update_permutation_char(a->d_columns_char[a->type_index[colInd]], raw_ptr, a->mRecCount, exe_value.top(), (char*)temp, a->char_size[a->type_index[colInd]]);
//update_permutation(a->d_columns_int[int_col_count+str_count], raw_ptr, a->mRecCount, exe_value.top(), (int_type*)temp);
str_count++;
};
};
b->resize(a->mRecCount); //resize host arrays
b->mRecCount = a->mRecCount;
str_count = 0;
for (unsigned int i = 0; i < a->mColumnCount; i++) {
if ((a->type)[i] == 0)
apply_permutation(a->d_columns_int[a->type_index[i]], raw_ptr, a->mRecCount, (int_type*)temp);
else if ((a->type)[i] == 1)
apply_permutation(a->d_columns_float[a->type_index[i]], raw_ptr, a->mRecCount, (float_type*)temp);
else {
apply_permutation_char(a->d_columns_char[a->type_index[i]], raw_ptr, a->mRecCount, (char*)temp, a->char_size[a->type_index[i]]);
str_count++;
};
};
for(unsigned int i = 0; i < a->mColumnCount; i++) {
switch(a->type[i]) {
case 0 :
thrust::copy(a->d_columns_int[a->type_index[i]].begin(), a->d_columns_int[a->type_index[i]].begin() + a->mRecCount, b->h_columns_int[b->type_index[i]].begin());
break;
case 1 :
thrust::copy(a->d_columns_float[a->type_index[i]].begin(), a->d_columns_float[a->type_index[i]].begin() + a->mRecCount, b->h_columns_float[b->type_index[i]].begin());
break;
default :
cudaMemcpy(b->h_columns_char[b->type_index[i]], a->d_columns_char[a->type_index[i]], a->char_size[a->type_index[i]]*a->mRecCount, cudaMemcpyDeviceToHost);
}
};
b->deAllocOnDevice();
a->deAllocOnDevice();
thrust::device_free(permutation);
cudaFree(temp);
};
varNames[s] = b;
b->segCount = 1;
b->not_compressed = 1;
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
}
void emit_select(char *s, char *f, int ll)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Select : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
queue<string> op_v1(op_value);
while(op_v1.size() > ll)
op_v1.pop();
stack<string> op_v2;
queue<string> op_v3;
for(int i=0; i < ll; ++i) {
op_v2.push(op_v1.front());
op_v3.push(op_v1.front());
op_v1.pop();
};
CudaSet *a;
a = varNames.find(f)->second;
if(a->mRecCount == 0) {
CudaSet *c;
c = new CudaSet(0,1);
varNames[s] = c;
clean_queues();
return;
};
cout << "SELECT " << s << " " << f << endl;
//cout << "free mem " << getFreeMem() << endl;
std::clock_t start1 = std::clock();
// here we need to determine the column count and composition
queue<string> op_v(op_value);
queue<string> op_vx;
set<string> field_names;
map<string,string> aliases;
string tt;
for(int i=0; !op_v.empty(); ++i, op_v.pop()) {
if(a->columnNames.find(op_v.front()) != a->columnNames.end()) {
field_names.insert(op_v.front());
if(aliases.count(op_v.front()) == 0 && aliases.size() < ll) {
tt = op_v.front();
op_v.pop();
aliases[tt] = op_v.front();
};
};
};
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
op_vx.push(*it);
};
// find out how many columns a new set will have
queue<string> op_t(op_type);
int_type col_count = 0;
for(int i=0; !op_t.empty(); ++i, op_t.pop())
if((op_t.front()).compare("emit sel_name") == 0)
col_count++;
CudaSet *b, *c;
curr_segment = 10000000;
//setSegments(a, op_vx);
//cout << "segs " << a->segCount << endl;
//exit(0);
allocColumns(a, op_vx);
unsigned int cycle_count;
if(!a->prm.empty())
cycle_count = varNames[setMap[op_value.front()]]->segCount;
else
cycle_count = a->segCount;
unsigned long long int ol_count = a->mRecCount;
unsigned int cnt;
//varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
a->oldRecCount = a->mRecCount;
b = new CudaSet(0, col_count);
bool b_set = 0, c_set = 0;
unsigned int long long tmp_size = a->mRecCount;
if(a->segCount > 1)
tmp_size = a->maxRecs;
boost::unordered_map<long long int, unsigned int> mymap; //this is where we keep the hashes of the records
vector<thrust::device_vector<int_type> > distinct_val; //keeps array of DISTINCT values for every key
vector<thrust::device_vector<int_type> > distinct_hash; //keeps array of DISTINCT values for every key
vector<thrust::device_vector<int_type> > distinct_tmp;
for(unsigned int i = 0; i < distinct_cnt; i++) {
distinct_tmp.push_back(thrust::device_vector<int_type>(tmp_size));
distinct_val.push_back(thrust::device_vector<int_type>());
distinct_hash.push_back(thrust::device_vector<int_type>());
};
// find out how many string columns we have. Add int_type columns to store string hashes for sort/groupby ops.
stack<string> op_s = op_v2;
int_col_count = a->d_columns_int.size();
while(!op_s.empty()) {
int colInd = (a->columnNames).find(op_s.top())->second;
if (a->type[colInd] == 2) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
};
op_s.pop();
};
unsigned int s_cnt;
bool one_liner;
for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE
cout << "cycle " << i << " select mem " << getFreeMem() << endl;
reset_offsets();
op_s = op_v2;
s_cnt = 0;
while(!op_s.empty()) {
int colInd = (a->columnNames).find(op_s.top())->second;
if (a->type[colInd] == 2) {
a->d_columns_int[int_col_count + s_cnt].resize(0);
a->add_hashed_strings(op_s.top(), i, int_col_count + s_cnt);
s_cnt++;
};
op_s.pop();
};
cnt = 0;
copyColumns(a, op_vx, i, cnt);
if(a->mRecCount) {
if (ll != 0) {
order_inplace(a,op_v2,field_names,i);
a->GroupBy(op_v2, int_col_count);
};
for(unsigned int z = int_col_count; z < a->d_columns_int.size()-1; z++)
a->d_columns_int[z].resize(0);
select(op_type,op_value,op_nums, op_nums_f,a,b, distinct_tmp, one_liner);
if(!b_set) {
for ( map<string,int>::iterator it=b->columnNames.begin() ; it != b->columnNames.end(); ++it )
setMap[(*it).first] = s;
b_set = 1;
unsigned int old_cnt = b->mRecCount;
b->mRecCount = 0;
b->resize(varNames[setMap[op_vx.front()]]->maxRecs);
b->mRecCount = old_cnt;
};
if (!c_set) {
c = new CudaSet(0, col_count);
create_c(c,b);
c_set = 1;
};
if (ll != 0 && cycle_count > 1 ) {
add(c,b,op_v3, mymap, aliases, distinct_tmp, distinct_val, distinct_hash, a);
}
else {
//copy b to c
unsigned int c_offset = c->mRecCount;
c->resize(b->mRecCount);
for(unsigned int j=0; j < b->mColumnCount; j++) {
if (b->type[j] == 0) {
thrust::copy(b->d_columns_int[b->type_index[j]].begin(), b->d_columns_int[b->type_index[j]].begin() + b->mRecCount, c->h_columns_int[c->type_index[j]].begin() + c_offset);
}
else if (b->type[j] == 1) {
thrust::copy(b->d_columns_float[b->type_index[j]].begin(), b->d_columns_float[b->type_index[j]].begin() + b->mRecCount, c->h_columns_float[c->type_index[j]].begin() + c_offset);
}
else {
cudaMemcpy((void*)(thrust::raw_pointer_cast(c->h_columns_char[c->type_index[j]] + b->char_size[b->type_index[j]]*c_offset)), (void*)thrust::raw_pointer_cast(b->d_columns_char[b->type_index[j]]),
b->char_size[b->type_index[j]] * b->mRecCount, cudaMemcpyDeviceToHost);
};
};
};
};
};
a->mRecCount = ol_count;
a->mRecCount = a->oldRecCount;
a->deAllocOnDevice();
b->deAllocOnDevice();
if (ll != 0) {
count_avg(c, mymap, distinct_hash);
}
else {
if(one_liner) {
count_simple(c);
};
};
reset_offsets();
c->maxRecs = c->mRecCount;
c->name = s;
c->keep = 1;
for ( map<string,int>::iterator it=c->columnNames.begin() ; it != c->columnNames.end(); ++it ) {
setMap[(*it).first] = s;
};
cout << "final select " << c->mRecCount << endl;
clean_queues();
varNames[s] = c;
b->free();
varNames[s]->keep = 1;
if(stat[s] == statement_count) {
varNames[s]->free();
varNames.erase(s);
};
if(stat[f] == statement_count && a->keep == 0) {
a->free();
varNames.erase(f);
};
std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_filter(char *s, char *f, int e)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Filter : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
std::clock_t start1 = std::clock();
if(a->mRecCount == 0) {
b = new CudaSet(0,1);
}
else {
cout << "FILTER " << s << " " << f << " " << getFreeMem() << endl;
b = a->copyDeviceStruct();
b->name = s;
unsigned int cycle_count = 1, cnt = 0;
allocColumns(a, op_value);
varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
if(a->segCount != 1)
cycle_count = varNames[setMap[op_value.front()]]->segCount;
oldCount = a->mRecCount;
thrust::device_vector<unsigned int> p(a->maxRecs);
for(unsigned int i = 0; i < cycle_count; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, a, i);
cout << "MAP CHECK " << map_check << endl;
reset_offsets();
if(map_check == 'R') {
copyColumns(a, op_value, i, cnt);
filter(op_type,op_value,op_nums, op_nums_f,a, b, i, p);
}
else {
setPrm(a,b,map_check,i);
}
};
a->mRecCount = oldCount;
varNames[setMap[op_value.front()]]->mRecCount = varNames[setMap[op_value.front()]]->oldRecCount;
cout << "filter is finished " << b->mRecCount << " " << getFreeMem() << endl;
a->deAllocOnDevice();
};
clean_queues();
if (varNames.count(s) > 0)
varNames[s]->free();
varNames[s] = b;
if(stat[s] == statement_count) {
b->free();
varNames.erase(s);
};
if(stat[f] == statement_count && !a->keep) {
//a->free();
//varNames.erase(f);
};
std::cout<< "filter time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
}
void emit_store(char *s, char *f, char* sep)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
cout << "STORE: " << s << " " << f << " " << sep << endl;
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
a->Store(f,sep, limit, 0);
if(stat[s] == statement_count && a->keep == 0) {
a->free();
varNames.erase(s);
};
};
void emit_store_binary(char *s, char *f)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
if(stat[f] == statement_count)
a->deAllocOnDevice();
printf("STORE: %s %s \n", s, f);
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
total_count = 0;
total_segments = 0;
fact_file_loaded = 0;
while(!fact_file_loaded) {
cout << "LOADING " << f_file << " " << separator << " " << getFreeMem() << endl;
if(a->text_source)
fact_file_loaded = a->LoadBigFile(f_file.c_str(), separator.c_str());
cout << "Writing a file " << endl;
a->Store(f,"", limit, 1);
cout << "Finished writing a file " << endl;
};
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(s);
};
};
void emit_load_binary(char *s, char *f, int d)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("BINARY LOAD: %s %s \n", s, f);
CudaSet *a;
unsigned int segCount, maxRecs;
char f1[100];
strcpy(f1, f);
strcat(f1,".");
char col_pos[3];
itoaa(cols.front(),col_pos);
strcat(f1,col_pos);
strcat(f1,".header");
FILE* ff = fopen(f1, "rb");
if(ff == NULL) {
cout << "Couldn't open file " << f1 << endl;
exit(0);
};
fread((char *)&totalRecs, 8, 1, ff);
fread((char *)&segCount, 4, 1, ff);
fread((char *)&maxRecs, 4, 1, ff);
fclose(ff);
cout << "Reading " << totalRecs << " records" << endl;
queue<string> names(namevars);
while(!names.empty()) {
setMap[names.front()] = s;
names.pop();
};
a = new CudaSet(namevars, typevars, sizevars, cols,totalRecs, f);
a->segCount = segCount;
a->maxRecs = maxRecs;
a->keep = 1;
varNames[s] = a;
if(stat[s] == statement_count ) {
a->free();
varNames.erase(s);
};
}
void emit_load(char *s, char *f, int d, char* sep)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("LOAD: %s %s %d %s \n", s, f, d, sep);
CudaSet *a;
a = new CudaSet(namevars, typevars, sizevars, cols, process_count);
a->mRecCount = 0;
a->resize(process_count);
a->keep = true;
a->not_compressed = 1;
string separator1(sep);
separator = separator1;
string ff(f);
f_file = ff;
a->maxRecs = a->mRecCount;
a->segCount = 0;
varNames[s] = a;
if(stat[s] == statement_count) {
a->free();
varNames.erase(s);
};
}
void yyerror(char *s, ...)
{
extern int yylineno;
va_list ap;
va_start(ap, s);
fprintf(stderr, "%d: error: ", yylineno);
vfprintf(stderr, s, ap);
fprintf(stderr, "\n");
}
void clean_queues()
{
while(!op_type.empty()) op_type.pop();
while(!op_value.empty()) op_value.pop();
while(!op_join.empty()) op_join.pop();
while(!op_nums.empty()) op_nums.pop();
while(!op_nums_f.empty()) op_nums_f.pop();
while(!j_col_count.empty()) j_col_count.pop();
while(!namevars.empty()) namevars.pop();
while(!typevars.empty()) typevars.pop();
while(!sizevars.empty()) sizevars.pop();
while(!cols.empty()) cols.pop();
sel_count = 0;
join_cnt = 0;
join_col_cnt = 0;
distinct_cnt = 0;
reset_offsets();
}
int main(int ac, char **av)
{
extern FILE *yyin;
//cudaDeviceProp deviceProp;
//cudaGetDeviceProperties(&deviceProp, 0);
//if (!deviceProp.canMapHostMemory)
// cout << "Device 0 cannot map host memory" << endl;
//cudaSetDeviceFlags(cudaDeviceMapHost);
cudppCreate(&theCudpp);
long long int r30 = RAND_MAX*rand()+rand();
long long int s30 = RAND_MAX*rand()+rand();
long long int t4 = rand() & 0xf;
hash_seed = (r30 << 34) + (s30 << 4) + t4;
if (ac == 1) {
cout << "Usage : alenka -l process_count script.sql" << endl;
exit(1);
};
if(strcmp(av[1],"-l") == 0) {
process_count = atoff(av[2]);
cout << "Process count = " << process_count << endl;
}
else {
process_count = 6200000;
cout << "Process count = 6200000 " << endl;
};
if((yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[ac-1]);
exit(1);
};
if(yyparse()) {
printf("SQL scan parse failed\n");
exit(1);
};
scan_state = 1;
std::clock_t start1 = std::clock();
statement_count = 0;
clean_queues();
if(ac > 1 && (yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[1]);
exit(1);
}
PROC_FLUSH_BUF ( yyin );
statement_count = 0;
if(!yyparse())
cout << "SQL scan parse worked" << endl;
else
cout << "SQL scan parse failed" << endl;
if(alloced_sz)
cudaFree(alloced_tmp);
fclose(yyin);
std::cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
cudppDestroy(theCudpp);
}
|
04bc3c8d167933a5208158c99a69e1f6ef050242.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hist-equ.h"
#define BLOCK_NUM 256
#define THREAD_NUM 256
void histogram(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin){
int i;
for ( i = 0; i < nbr_bin; i ++){
hist_out[i] = 0; //construct an array with one entry for each color grey
}
for ( i = 0; i < img_size; i ++){
hist_out[img_in[i]] ++; //fill array with counts of pixels of that color in image
}
}
void histogram_equalization(unsigned char * img_out, unsigned char * img_in,
int * hist_in, int img_size, int nbr_bin){
int *lut = (int *)malloc(sizeof(int)*nbr_bin);
int i, cdf, min, d;
/* Construct the LUT by calculating the CDF */
cdf = 0;
min = 0;
i = 0;
while(min == 0){
min = hist_in[i++]; //find the number of darkest pixels in the image
}
d = img_size - min;
for(i = 0; i < nbr_bin; i ++){
cdf += hist_in[i];
//lut[i] = (cdf - min)*(nbr_bin - 1)/d;
lut[i] = (int)(((float)cdf - min)*255/d + 0.5);
if(lut[i] < 0){
lut[i] = 0;
}
}
/* Get the result image */
for(i = 0; i < img_size; i ++){
if(lut[img_in[i]] > 255){
img_out[i] = 255;
}
else{
img_out[i] = (unsigned char)lut[img_in[i]];
}
}
}
__global__ void histogram_gpu(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= img_size)
{
return;
}
unsigned char value = img_in[id];
int bin = value% nbr_bin;
atomicAdd(&hist_out[bin],1);
}
void getHist(int * hist_out, unsigned char* img_in, int img_size, int nbr_bin){
unsigned char * dArray;
hipMalloc(&dArray, img_size);
hipMemcpy(dArray, img_in, img_size,hipMemcpyHostToDevice);
int * dHist;
hipMalloc(&dHist, nbr_bin * sizeof(int));
hipMemset(dHist,0,nbr_bin * sizeof(int));
dim3 block(32);
dim3 grid((img_size + block.x - 1)/block.x);
hipLaunchKernelGGL(( histogram_gpu), dim3(grid),dim3(block), 0, 0, dHist,dArray,img_size,nbr_bin);
hipMemcpy(hist_out,dHist, nbr_bin * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dArray);
hipFree(dHist);
}
/*__global__ void histogram_image_compile_gpu(unsigned char * img_out, unsigned char * img_in,
int * lut, int image_size, int nbr_bin) {
__shared__ unsigned int memlut[255];
for(int i = 0; i < 255; i++){
memlut[i] = lut[i]; //don't know if pointer is correct but I want a local copy of lut
}
int chunk_size = blockIdx.x; //need code here, we need to split the image array into local parts to run high performance calcs on
int offset = image_size/blockIdx.x; //when getting a chunk of the in image, or writing a chunk to the out image, offset+i should map to the correct location
__shared__ unsigned int local_img[9999]; //create a local version of a segment of the image to work against so the whole image isn't stored in gpu memory per core
for(int i = 0; i < chunk_size; i ++) {
local_img[i] = img_in[offset+i];
}
__syncthreads();
for(int i = 0; i < chunk_size; i++) {
local_img[i] = lut[local_img[i]];
}
__syncthreads();
for(int i = 0; i < chunk_size; i++) {
img_out[offset+i] = local_img[i];
}
}
__host__ static void histogram_equalization_gpu(unsigned char * img_out, unsigned char * img_in,
int * hist_in, int img_size, int nbr_bin){
/* Calculating the lut doesn't really make sense as a massively parallel thing, as it's only going through a maximum of 255 steps
so lets only cudaize the result image formation step */
/*unsigned int lut[nbr_bin]; //look up table, same size as hist
int i, cdf, min, d;
/* Construct the LUT by calculating the CDF */
/* cdf = 0;
min = 0;
i = 0;
while(min == 0){
min = hist_in[i++]; //find the number of darkest pixels in the image
}
d = img_size - min;
for(i = 0; i < nbr_bin; i ++){
cdf += hist_in[i];
//lut[i] = (cdf - min)*(nbr_bin - 1)/d;
lut[i] = (int)(((float)cdf - min)*255/d + 0.5);
if(lut[i] < 0){
lut[i] = 0;
}
}
unsigned char * dArray;
hipMalloc(&dArray, img_size);
hipMemcpy(dArray, img_in, img_size,hipMemcpyHostToDevice);
int * dArrayOut;
hipMalloc(&dArrayOut, img_size);
hipMemset(dArrayOut,0, img_size);
int * dLUT;
cudaMallock(&dLUT, nbr_bin);
hipMemset(dLUT, lut, nbr_bin, hipMemcpyHostToDevice);
dim3 block(32);
dim3 grid((img_size + block.x - 1)/block.x);
histogram_image_compile_gpu<<<grid,block>>>(dArrayOut,dArray,dLUT,img_size,nbr_bin);
hipMemcpy(img_out,dArrayOut, img_size * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dArray);
hipFree(dArrayOut);
hipFree(dLUT);
/* Get the result image*/
//histogram_image_compile_gpu(img_out, img_in, lut, img_size, nbr_bin);
//}
| 04bc3c8d167933a5208158c99a69e1f6ef050242.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hist-equ.h"
#define BLOCK_NUM 256
#define THREAD_NUM 256
void histogram(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin){
int i;
for ( i = 0; i < nbr_bin; i ++){
hist_out[i] = 0; //construct an array with one entry for each color grey
}
for ( i = 0; i < img_size; i ++){
hist_out[img_in[i]] ++; //fill array with counts of pixels of that color in image
}
}
void histogram_equalization(unsigned char * img_out, unsigned char * img_in,
int * hist_in, int img_size, int nbr_bin){
int *lut = (int *)malloc(sizeof(int)*nbr_bin);
int i, cdf, min, d;
/* Construct the LUT by calculating the CDF */
cdf = 0;
min = 0;
i = 0;
while(min == 0){
min = hist_in[i++]; //find the number of darkest pixels in the image
}
d = img_size - min;
for(i = 0; i < nbr_bin; i ++){
cdf += hist_in[i];
//lut[i] = (cdf - min)*(nbr_bin - 1)/d;
lut[i] = (int)(((float)cdf - min)*255/d + 0.5);
if(lut[i] < 0){
lut[i] = 0;
}
}
/* Get the result image */
for(i = 0; i < img_size; i ++){
if(lut[img_in[i]] > 255){
img_out[i] = 255;
}
else{
img_out[i] = (unsigned char)lut[img_in[i]];
}
}
}
__global__ void histogram_gpu(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= img_size)
{
return;
}
unsigned char value = img_in[id];
int bin = value% nbr_bin;
atomicAdd(&hist_out[bin],1);
}
void getHist(int * hist_out, unsigned char* img_in, int img_size, int nbr_bin){
unsigned char * dArray;
cudaMalloc(&dArray, img_size);
cudaMemcpy(dArray, img_in, img_size,cudaMemcpyHostToDevice);
int * dHist;
cudaMalloc(&dHist, nbr_bin * sizeof(int));
cudaMemset(dHist,0,nbr_bin * sizeof(int));
dim3 block(32);
dim3 grid((img_size + block.x - 1)/block.x);
histogram_gpu<<<grid,block>>>(dHist,dArray,img_size,nbr_bin);
cudaMemcpy(hist_out,dHist, nbr_bin * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dArray);
cudaFree(dHist);
}
/*__global__ void histogram_image_compile_gpu(unsigned char * img_out, unsigned char * img_in,
int * lut, int image_size, int nbr_bin) {
__shared__ unsigned int memlut[255];
for(int i = 0; i < 255; i++){
memlut[i] = lut[i]; //don't know if pointer is correct but I want a local copy of lut
}
int chunk_size = blockIdx.x; //need code here, we need to split the image array into local parts to run high performance calcs on
int offset = image_size/blockIdx.x; //when getting a chunk of the in image, or writing a chunk to the out image, offset+i should map to the correct location
__shared__ unsigned int local_img[9999]; //create a local version of a segment of the image to work against so the whole image isn't stored in gpu memory per core
for(int i = 0; i < chunk_size; i ++) {
local_img[i] = img_in[offset+i];
}
__syncthreads();
for(int i = 0; i < chunk_size; i++) {
local_img[i] = lut[local_img[i]];
}
__syncthreads();
for(int i = 0; i < chunk_size; i++) {
img_out[offset+i] = local_img[i];
}
}
__host__ static void histogram_equalization_gpu(unsigned char * img_out, unsigned char * img_in,
int * hist_in, int img_size, int nbr_bin){
/* Calculating the lut doesn't really make sense as a massively parallel thing, as it's only going through a maximum of 255 steps
so lets only cudaize the result image formation step */
/*unsigned int lut[nbr_bin]; //look up table, same size as hist
int i, cdf, min, d;
/* Construct the LUT by calculating the CDF */
/* cdf = 0;
min = 0;
i = 0;
while(min == 0){
min = hist_in[i++]; //find the number of darkest pixels in the image
}
d = img_size - min;
for(i = 0; i < nbr_bin; i ++){
cdf += hist_in[i];
//lut[i] = (cdf - min)*(nbr_bin - 1)/d;
lut[i] = (int)(((float)cdf - min)*255/d + 0.5);
if(lut[i] < 0){
lut[i] = 0;
}
}
unsigned char * dArray;
cudaMalloc(&dArray, img_size);
cudaMemcpy(dArray, img_in, img_size,cudaMemcpyHostToDevice);
int * dArrayOut;
cudaMalloc(&dArrayOut, img_size);
cudaMemset(dArrayOut,0, img_size);
int * dLUT;
cudaMallock(&dLUT, nbr_bin);
cudaMemset(dLUT, lut, nbr_bin, cudaMemcpyHostToDevice);
dim3 block(32);
dim3 grid((img_size + block.x - 1)/block.x);
histogram_image_compile_gpu<<<grid,block>>>(dArrayOut,dArray,dLUT,img_size,nbr_bin);
cudaMemcpy(img_out,dArrayOut, img_size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dArray);
cudaFree(dArrayOut);
cudaFree(dLUT);
/* Get the result image*/
//histogram_image_compile_gpu(img_out, img_in, lut, img_size, nbr_bin);
//}
|
8d960deff8cbd09705c072806416edfdf6ab7246.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <torch/types.h>
__device__ __forceinline__ float sum_reduce(float acc, float x) {
return acc + x;
}
__device__ __forceinline__ float sum_init() {
return 0;
}
__global__ void topoCacheCoarsenSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int cid = (blockIdx.y<<6)+threadIdx.x;
int lb = A_indptr[rid];
int hb = A_indptr[rid+1];
int ptr = lb+threadIdx.x;
int offset;
float acc1 = sum_init();
float acc2 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
acc1 = sum_reduce(acc1, B[offset]);
acc2 = sum_reduce(acc2, B[(offset+32)]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
C[offset+32] = acc2;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
if (nout>1) {
acc2 = sum_reduce(acc2, B[(offset+32)]);}
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));}
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
if (nout>1) {
C[offset+32] = acc2;}
}
}
}
__global__ void topoCacheSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset + threadIdx.x;
int cid = (blockIdx.y<<5)+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
int offset;
int ptr = lb+threadIdx.x;
float acc1 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[sm_offset+kk]+cid;
acc1 = sum_reduce(acc1, B[offset]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
}
}
}
__global__ void topoSimpleSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
float acc1 = sum_init();
int offset;
for (int ptr=lb; ptr<hb; ptr++) {
// offset = __ldg(A_indices+ptr)*k+threadIdx.x;
// acc1 = sum_reduce(acc1, __ldg(B+offset));
offset = A_indices[ptr]*k+threadIdx.x;
acc1 = sum_reduce(acc1, B[offset]);
}
C[(rid*k+threadIdx.x)] = acc1;
}
}
torch::Tensor spmm_cuda_no_edge_value(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor dense
) {
const auto m = rowptr.size(0)-1;
const auto k = dense.size(1);
auto devid = dense.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({m,k}, options);
if (k<32) {
const int row_per_block = 128/k;
const int n_block = (m+row_per_block-1)/row_per_block;
hipLaunchKernelGGL(( topoSimpleSPMMKernel), dim3(dim3(n_block,1,1)),dim3(dim3(k, row_per_block, 1)), 0, 0,
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
if (k<64) {
const int tile_k = (k+31)/32;
const int n_block = (m+3)/4;
hipLaunchKernelGGL(( topoCacheSPMMKernel), dim3(dim3(n_block,tile_k,1)), dim3(dim3(32,4,1)), 128*sizeof(int), 0,
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
else {
const int tile_k = (k+63)/64;
const int n_block = (m+8-1)/8;
hipLaunchKernelGGL(( topoCacheCoarsenSPMMKernel), dim3(dim3(n_block,tile_k,1)), dim3(dim3(32,8,1)), 8*32*sizeof(int), 0,
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
}
__global__ void spmm_test0(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<5)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int offset = 0;
float acc=0;
if (blockIdx.y!=gridDim.y-1){
for (int ptr = lb; ptr<hb; ptr++) {
offset = A_csrColInd[ptr]*B_ncols+cid;
acc += A_csrVal[ptr]*B_dnVal[offset];
}
C_dnVal[(rid*B_ncols+cid)] = acc;
}
else {
for (int ptr = lb; ptr<hb; ptr++) {
if (cid<B_ncols) {
offset = A_csrColInd[ptr]*B_ncols+cid;}
acc += A_csrVal[ptr]*B_dnVal[offset];
}
if (cid<B_ncols) {
C_dnVal[(rid*B_ncols+cid)] = acc;}
}
}
}
__global__ void spmm_test1(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
extern __shared__ int sh[];
int *colInd_sh = sh;
float *val_sh = (float *)&sh[(blockDim.y<<5)];
int shmem_offset = (threadIdx.y<<5);
int thread_idx = shmem_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<5)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int ptr = lb+threadIdx.x;
int offset;
float acc=0;
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset];
}
__syncwarp();
}
C_dnVal[(rid*B_ncols+cid)] = acc;
}
else {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
if (cid<B_ncols) {
acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset];
}
}
__syncwarp();
}
if (cid<B_ncols) {
C_dnVal[(rid*B_ncols+cid)] = acc;
}
}
}
}
__global__ void spmm_test2(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
extern __shared__ int sh[];
int *colInd_sh = sh;
float *val_sh = (float *)&sh[(blockDim.y<<5)];
int shmem_offset = (threadIdx.y<<5);
int thread_idx = shmem_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<6)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int ptr = lb+threadIdx.x;
int offset;
float acc1=0, acc2=0, val;
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
val = val_sh[(shmem_offset+kk)];
acc1 += val*B_dnVal[offset];
acc2 += val*B_dnVal[offset+32];
}
__syncwarp();
}
offset = rid*B_ncols+cid;
C_dnVal[offset] = acc1;
C_dnVal[offset+32] = acc2;
}
else {
int nout = (B_ncols-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
val = val_sh[(shmem_offset+kk)];
offset = colInd_sh[(shmem_offset+kk)] + cid;
if (nout>0) {
acc1 += val*B_dnVal[offset];
}
if (nout>1) {
acc2 += val*B_dnVal[offset+32];
}
}
__syncwarp();
}
offset = rid*B_ncols+cid;
if (nout>0) {
C_dnVal[offset] = acc1;
}
if (nout>1) {
C_dnVal[(offset+32)] = acc2;
}
}
}
}
torch::Tensor spmm_cuda(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor values,
torch::Tensor dense
) {
const auto m = rowptr.size(0)-1;
const auto k = dense.size(1);
auto devid = dense.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({m,k}, options);
if (k<32) {
const int row_per_block = 128/k;
const int n_block = (m+row_per_block-1)/row_per_block;
hipLaunchKernelGGL(( spmm_test0), dim3(dim3(n_block,1,1)),dim3(dim3(k, row_per_block, 1)), 0, 0,
m, k, rowptr.data<int>(), colind.data<int>(), values.data<float>(), dense.data<float>(), out.data<float>());
return out;
}
if (k<64) {
const int tile_k = (k+31)/32;
const int n_block = (m+4-1)/4;
hipLaunchKernelGGL(( spmm_test1), dim3(dim3(n_block, tile_k, 1)), dim3(dim3(32, 4, 1)), 32*4*(sizeof(int)+sizeof(float)), 0,
m, k, rowptr.data<int>(), colind.data<int>(), values.data<float>(), dense.data<float>(), out.data<float>());
return out;
}
else {
const int tile_k = (k+63)/64;
const int n_block = (m+8-1)/8;
hipLaunchKernelGGL(( spmm_test2), dim3(dim3(n_block, tile_k, 1)), dim3(dim3(32, 8, 1)), 32*8*(sizeof(int)+sizeof(float)), 0,
m, k, rowptr.data<int>(), colind.data<int>(), values.data<float>(), dense.data<float>(), out.data<float>());
return out;
}
}
| 8d960deff8cbd09705c072806416edfdf6ab7246.cu | #include <cuda.h>
#include <torch/types.h>
__device__ __forceinline__ float sum_reduce(float acc, float x) {
return acc + x;
}
__device__ __forceinline__ float sum_init() {
return 0;
}
__global__ void topoCacheCoarsenSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int cid = (blockIdx.y<<6)+threadIdx.x;
int lb = A_indptr[rid];
int hb = A_indptr[rid+1];
int ptr = lb+threadIdx.x;
int offset;
float acc1 = sum_init();
float acc2 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
acc1 = sum_reduce(acc1, B[offset]);
acc2 = sum_reduce(acc2, B[(offset+32)]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
C[offset+32] = acc2;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
if (nout>1) {
acc2 = sum_reduce(acc2, B[(offset+32)]);}
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));}
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
if (nout>1) {
C[offset+32] = acc2;}
}
}
}
__global__ void topoCacheSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset + threadIdx.x;
int cid = (blockIdx.y<<5)+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
int offset;
int ptr = lb+threadIdx.x;
float acc1 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[sm_offset+kk]+cid;
acc1 = sum_reduce(acc1, B[offset]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
}
}
}
__global__ void topoSimpleSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
float acc1 = sum_init();
int offset;
for (int ptr=lb; ptr<hb; ptr++) {
// offset = __ldg(A_indices+ptr)*k+threadIdx.x;
// acc1 = sum_reduce(acc1, __ldg(B+offset));
offset = A_indices[ptr]*k+threadIdx.x;
acc1 = sum_reduce(acc1, B[offset]);
}
C[(rid*k+threadIdx.x)] = acc1;
}
}
torch::Tensor spmm_cuda_no_edge_value(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor dense
) {
const auto m = rowptr.size(0)-1;
const auto k = dense.size(1);
auto devid = dense.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({m,k}, options);
if (k<32) {
const int row_per_block = 128/k;
const int n_block = (m+row_per_block-1)/row_per_block;
topoSimpleSPMMKernel<<< dim3(n_block,1,1),dim3(k, row_per_block, 1)>>>(
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
if (k<64) {
const int tile_k = (k+31)/32;
const int n_block = (m+3)/4;
topoCacheSPMMKernel<<< dim3(n_block,tile_k,1), dim3(32,4,1), 128*sizeof(int)>>>(
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
else {
const int tile_k = (k+63)/64;
const int n_block = (m+8-1)/8;
topoCacheCoarsenSPMMKernel<<< dim3(n_block,tile_k,1), dim3(32,8,1), 8*32*sizeof(int)>>>(
m, k, rowptr.data<int>(), colind.data<int>(), dense.data<float>(), out.data<float>());
return out;
}
}
__global__ void spmm_test0(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<5)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int offset = 0;
float acc=0;
if (blockIdx.y!=gridDim.y-1){
for (int ptr = lb; ptr<hb; ptr++) {
offset = A_csrColInd[ptr]*B_ncols+cid;
acc += A_csrVal[ptr]*B_dnVal[offset];
}
C_dnVal[(rid*B_ncols+cid)] = acc;
}
else {
for (int ptr = lb; ptr<hb; ptr++) {
if (cid<B_ncols) {
offset = A_csrColInd[ptr]*B_ncols+cid;}
acc += A_csrVal[ptr]*B_dnVal[offset];
}
if (cid<B_ncols) {
C_dnVal[(rid*B_ncols+cid)] = acc;}
}
}
}
__global__ void spmm_test1(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
extern __shared__ int sh[];
int *colInd_sh = sh;
float *val_sh = (float *)&sh[(blockDim.y<<5)];
int shmem_offset = (threadIdx.y<<5);
int thread_idx = shmem_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<5)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int ptr = lb+threadIdx.x;
int offset;
float acc=0;
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset];
}
__syncwarp();
}
C_dnVal[(rid*B_ncols+cid)] = acc;
}
else {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
if (cid<B_ncols) {
acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset];
}
}
__syncwarp();
}
if (cid<B_ncols) {
C_dnVal[(rid*B_ncols+cid)] = acc;
}
}
}
}
__global__ void spmm_test2(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
extern __shared__ int sh[];
int *colInd_sh = sh;
float *val_sh = (float *)&sh[(blockDim.y<<5)];
int shmem_offset = (threadIdx.y<<5);
int thread_idx = shmem_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<6)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int ptr = lb+threadIdx.x;
int offset;
float acc1=0, acc2=0, val;
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
val = val_sh[(shmem_offset+kk)];
acc1 += val*B_dnVal[offset];
acc2 += val*B_dnVal[offset+32];
}
__syncwarp();
}
offset = rid*B_ncols+cid;
C_dnVal[offset] = acc1;
C_dnVal[offset+32] = acc2;
}
else {
int nout = (B_ncols-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
val = val_sh[(shmem_offset+kk)];
offset = colInd_sh[(shmem_offset+kk)] + cid;
if (nout>0) {
acc1 += val*B_dnVal[offset];
}
if (nout>1) {
acc2 += val*B_dnVal[offset+32];
}
}
__syncwarp();
}
offset = rid*B_ncols+cid;
if (nout>0) {
C_dnVal[offset] = acc1;
}
if (nout>1) {
C_dnVal[(offset+32)] = acc2;
}
}
}
}
torch::Tensor spmm_cuda(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor values,
torch::Tensor dense
) {
const auto m = rowptr.size(0)-1;
const auto k = dense.size(1);
auto devid = dense.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({m,k}, options);
if (k<32) {
const int row_per_block = 128/k;
const int n_block = (m+row_per_block-1)/row_per_block;
spmm_test0<<<dim3(n_block,1,1),dim3(k, row_per_block, 1)>>>(
m, k, rowptr.data<int>(), colind.data<int>(), values.data<float>(), dense.data<float>(), out.data<float>());
return out;
}
if (k<64) {
const int tile_k = (k+31)/32;
const int n_block = (m+4-1)/4;
spmm_test1<<<dim3(n_block, tile_k, 1), dim3(32, 4, 1), 32*4*(sizeof(int)+sizeof(float))>>> (
m, k, rowptr.data<int>(), colind.data<int>(), values.data<float>(), dense.data<float>(), out.data<float>());
return out;
}
else {
const int tile_k = (k+63)/64;
const int n_block = (m+8-1)/8;
spmm_test2<<<dim3(n_block, tile_k, 1), dim3(32, 8, 1), 32*8*(sizeof(int)+sizeof(float))>>> (
m, k, rowptr.data<int>(), colind.data<int>(), values.data<float>(), dense.data<float>(), out.data<float>());
return out;
}
}
|
c1ed794f6794d104841d3534b147dca288bff0d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_LAYER_NORM_LAYER_INSTANTIATE
#include "lbann/comm_impl.hpp"
#include "lbann/layers/regularizers/layer_norm.hpp"
#include "lbann/utils/gpu/helpers.hpp"
#include <thrust/pair.h>
namespace lbann {
namespace {
/** Functor for adding @c thrust::pair objects. */
template <typename Pair>
struct pair_sum {
__device__ __forceinline__
Pair operator()(const Pair& x, const Pair& y) {
return Pair(x.first+y.first, x.second+y.second);
}
};
/** Accumulate sums and sums of squares for each data sample.
*
* On input, sums and sqsums are filled with zeros.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (local_sample_size / bsize) x local_num_samples x 1
*/
template <size_t bdimx, typename TensorDataType>
__global__ void fp_sums_kernel(
size_t local_num_samples,
size_t local_sample_size,
const TensorDataType* __restrict__ vals,
size_t vals_ldim,
TensorDataType* sums,
size_t sums_stride,
TensorDataType* sqsums,
size_t sqsums_stride) {
// Indices and dimensions
constexpr size_t bdimy = 1;
constexpr size_t bdimz = 1;
const size_t tid = threadIdx.x + blockDim.x * threadIdx.y;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t i = gidy; i < local_num_samples; i += nthreadsy) {
// Accumulate sums and perform block-wide reduction
using pair_t = thrust::pair<TensorDataType,TensorDataType>;
using pair_sum_t = pair_sum<pair_t>;
pair_t sum_sqsum(0,0);
for (size_t j = gidx; j < local_sample_size; j += nthreadsx) {
const auto& x = vals[i*vals_ldim + j];
sum_sqsum.first += x;
sum_sqsum.second += x * x;
}
sum_sqsum = gpu_lib::block_reduce<bdimx,bdimy,bdimz,pair_t,pair_sum_t>(sum_sqsum);
// Output result to global memory
if (tid == 0) {
gpu_lib::atomic_add(&sums[i*sums_stride], sum_sqsum.first);
gpu_lib::atomic_add(&sqsums[i*sqsums_stride], sum_sqsum.second);
}
}
}
/** Compute per-sample statistics.
*
* mean = sum(x_i) / n
*
* var = ( sum(x_i^2)/n - mean^2 )
*
* On input, means contains per-sample sums and vars contains
* per-sample sums of squares.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (local_num_samples / bsize) x 1 x 1
*/
template <typename TensorDataType>
__global__ void fp_statistics_kernel(
unsigned long long sample_size,
size_t local_num_samples,
TensorDataType* means,
size_t means_stride,
TensorDataType* vars,
size_t vars_stride) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i = gid; i < local_num_samples; i += nthreads) {
const auto sum = means[i*means_stride];
const auto sqsum = vars[i*means_stride];
const TensorDataType sample_size_dt = TensorDataType(sample_size);
const auto& mean = sum / sample_size_dt;
const auto& sqmean = sqsum / sample_size_dt;
const auto& var = (sqmean - mean*mean);
means[i*means_stride] = mean;
vars[i*vars_stride] = gpu_lib::max(var, TensorDataType(0.0));
}
}
/** Compute outputs.
*
* y_i = (x_i - mean) / sqrt(var + epsilon)
*
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (local_sample_size / bdimx) x (local_num_samples / bdimy) x 1
*/
template <typename TensorDataType>
__global__ void fp_output_kernel(
size_t local_num_samples,
size_t local_sample_size,
TensorDataType epsilon,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* means,
size_t means_stride,
const TensorDataType* vars,
size_t vars_stride) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t i = gidy; i < local_num_samples; i += nthreadsy) {
const auto& mean = means[i*means_stride];
const auto& var = vars[i*vars_stride];
const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon);
for (size_t j = gidx; j < local_sample_size; j += nthreadsx) {
const auto& x = input[i*input_ldim + j];
auto& y = output[i*output_ldim + j];
y = (x - mean) * inv_stdev;
}
}
}
/** @brief Forward prop */
template <typename TensorDataType>
void fp_impl(lbann_comm& comm,
TensorDataType epsilon,
const El::AbstractDistMatrix<TensorDataType>& input,
El::AbstractDistMatrix<TensorDataType>& output,
El::AbstractDistMatrix<TensorDataType>& statistics) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Workspace buffer
statistics.Empty(false);
statistics.AlignWith(input);
statistics.Resize(2, input.Width());
// Local matrices
const auto& local_input = dynamic_cast<const GPUMatType&>(input.LockedMatrix());
auto& local_output = dynamic_cast<GPUMatType&>(output.Matrix());
auto& local_statistics = dynamic_cast<GPUMatType&>(statistics.Matrix());
auto local_means = El::View(local_statistics, El::IR(0), El::ALL);
auto local_vars = El::View(local_statistics, El::IR(1), El::ALL);
// Dimensions
const size_t sample_size = input.Height();
const size_t local_num_samples = local_input.Width();
const size_t local_sample_size = local_input.Height();
// Trivial cases
if (local_num_samples < 1) { return; }
// Compute sums
El::Zero(statistics);
if (!local_input.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_statistics),
gpu::get_sync_info(local_input));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_sample_size + block_size - 1) / block_size;
grid_dims.y = local_num_samples;
hydrogen::gpu::LaunchKernel(
fp_sums_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_num_samples, local_sample_size,
local_input.LockedBuffer(), local_input.LDim(),
local_means.Buffer(), local_means.LDim(),
local_vars.Buffer(), local_vars.LDim());
}
comm.allreduce(statistics, statistics.RedundantComm(), El::mpi::SUM);
// Compute statistics from sums
if (sample_size <= 1) {
// local_means already has correct values
El::Fill(local_vars, El::TypeTraits<TensorDataType>::One());
}
else if (!local_statistics.IsEmpty()) {
auto sync_info = gpu::get_sync_info(local_statistics);
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_num_samples + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
fp_statistics_kernel<TensorDataType>,
grid_dims, block_dims, 0, sync_info,
sample_size, local_num_samples,
local_means.Buffer(), local_means.LDim(),
local_vars.Buffer(), local_vars.LDim());
}
// Apply layer norm
if (!local_output.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_statistics),
gpu::get_sync_info(local_input));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_sample_size + block_size - 1) / block_size;
grid_dims.y = local_num_samples;
hydrogen::gpu::LaunchKernel(
fp_output_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_num_samples, local_sample_size, epsilon,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
local_means.LockedBuffer(), local_means.LDim(),
local_vars.LockedBuffer(), local_vars.LDim());
}
}
/** Compute gradients w.r.t. per-sample statistics.
*
* dL/dmean = - sum(dL/dy_i) / sqrt(var+epsilon)
*
* dL/dvar = - sum(dL/dy_i * (x_i-mean)) * (var+epsilon)^(-3/2) / 2
*
* On input, means_grad and vars_grad are filled with zeros.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (local_sample_size / bsize) x local_num_samples x 1
*/
template <size_t bdimx, typename TensorDataType>
__global__ void bp_statistics_grad_kernel(
size_t local_num_samples,
size_t local_sample_size,
TensorDataType epsilon,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ output_grad,
size_t output_grad_ldim,
const TensorDataType* means,
size_t means_stride,
const TensorDataType* vars,
size_t vars_stride,
TensorDataType* means_grad,
size_t means_grad_stride,
TensorDataType* vars_grad,
size_t vars_grad_stride) {
// Indices and dimensions
constexpr size_t bdimy = 1;
constexpr size_t bdimz = 1;
const size_t tid = threadIdx.x + blockDim.x * threadIdx.y;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t i = gidy; i < local_num_samples; i += nthreadsy) {
// Accumulate sums and perform block-wide reduction
using pair_t = thrust::pair<TensorDataType,TensorDataType>;
using pair_sum_t = pair_sum<pair_t>;
pair_t sums(0,0);
const auto& mean = means[i*means_stride];
for (size_t j = gidx; j < local_sample_size; j += nthreadsx) {
const auto& x = input[i*input_ldim + j];
const auto& dy = output_grad[i*output_grad_ldim + j];
sums.first += dy;
sums.second += dy * (x - mean);
}
sums = gpu_lib::block_reduce<bdimx,bdimy,bdimz,pair_t,pair_sum_t>(sums);
// Output result to global memory
if (tid == 0) {
const auto& var = vars[i*vars_stride];
const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon);
const TensorDataType dmean = -sums.first * inv_stdev;
const TensorDataType dvar = -sums.second * inv_stdev*inv_stdev*inv_stdev / TensorDataType(2);
gpu_lib::atomic_add(&means_grad[i*means_grad_stride], dmean);
gpu_lib::atomic_add(&vars_grad[i*vars_grad_stride], dvar);
}
}
}
/** Compute gradients w.r.t. input.
*
* dL/dx_i = ( dL/dy_i / sqrt(var+epsilon)
* + dL/dmean / n
* + dL/dvar * (x_i - mean) * 2/(n-1) )
*
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (local_sample_size / bdimx) x (local_num_samples / bdimy) x 1
*/
template <typename TensorDataType>
__global__ void bp_input_grad_kernel(
unsigned long long sample_size,
size_t local_num_samples,
size_t local_sample_size,
TensorDataType epsilon,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ output_grad,
size_t output_grad_ldim,
TensorDataType* __restrict__ input_grad,
size_t input_grad_ldim,
const TensorDataType* __restrict__ means,
size_t means_stride,
const TensorDataType* __restrict__ vars,
size_t vars_stride,
const TensorDataType* means_grad,
size_t means_grad_stride,
const TensorDataType* vars_grad,
size_t vars_grad_stride) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t i = gidy; i < local_num_samples; i += nthreadsy) {
const auto& mean = means[i*means_stride];
const auto& var = vars[i*vars_stride];
const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon);
const auto& dmean = means_grad[i*means_grad_stride];
const auto& dvar = vars_grad[i*vars_grad_stride];
for (size_t j = gidx; j < local_sample_size; j += nthreadsx) {
const auto& x = input[i*input_ldim + j];
const auto& dy = output_grad[i*output_grad_ldim + j];
auto& dx = input_grad[i*input_grad_ldim + j];
dx = (dy * inv_stdev
+ dmean / TensorDataType(sample_size)
+ dvar * (x - mean) * TensorDataType(2) / TensorDataType(sample_size));
}
}
}
/** @brief Backprop */
template <typename TensorDataType>
void bp_impl(lbann_comm& comm,
TensorDataType epsilon,
const El::AbstractDistMatrix<TensorDataType>& input,
const El::AbstractDistMatrix<TensorDataType>& output_grad,
El::AbstractDistMatrix<TensorDataType>& input_grad,
const El::AbstractDistMatrix<TensorDataType>& statistics,
El::AbstractDistMatrix<TensorDataType>& statistics_grad) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Workspace buffer
statistics_grad.Empty(false);
statistics_grad.AlignWith(input);
statistics_grad.Resize(2, input.Width());
// Local matrices
const auto& local_input = dynamic_cast<const GPUMatType&>(input.LockedMatrix());
const auto& local_output_grad = dynamic_cast<const GPUMatType&>(output_grad.LockedMatrix());
auto& local_input_grad = dynamic_cast<GPUMatType&>(input_grad.Matrix());
const auto& local_statistics = dynamic_cast<const GPUMatType&>(statistics.LockedMatrix());
const auto local_means = El::LockedView(local_statistics, El::IR(0), El::ALL);
const auto local_vars = El::LockedView(local_statistics, El::IR(1), El::ALL);
auto& local_statistics_grad = dynamic_cast<GPUMatType&>(statistics_grad.Matrix());
auto local_means_grad = El::View(local_statistics_grad, El::IR(0), El::ALL);
auto local_vars_grad = El::View(local_statistics_grad, El::IR(1), El::ALL);
// Dimensions
const size_t sample_size = input.Height();
const size_t local_num_samples = local_input.Width();
const size_t local_sample_size = local_input.Height();
// Trivial case if sample size <= 1
// Note: Output is constant, so error signal is zero.
if (sample_size <= 1) {
El::Zero(input_grad);
return;
}
// Compute gradient w.r.t. statistics
El::Zero(statistics_grad);
if (!local_output_grad.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_statistics_grad),
gpu::get_sync_info(local_output_grad),
gpu::get_sync_info(local_statistics),
gpu::get_sync_info(local_input));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_sample_size + block_size - 1) / block_size;
grid_dims.y = local_num_samples;
hydrogen::gpu::LaunchKernel(
bp_statistics_grad_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_num_samples, local_sample_size, epsilon,
local_input.LockedBuffer(), local_input.LDim(),
local_output_grad.LockedBuffer(), local_output_grad.LDim(),
local_means.LockedBuffer(), local_means.LDim(),
local_vars.LockedBuffer(), local_vars.LDim(),
local_means_grad.Buffer(), local_means_grad.LDim(),
local_vars_grad.Buffer(), local_vars_grad.LDim());
}
comm.allreduce(statistics_grad,
statistics_grad.RedundantComm(),
El::mpi::SUM);
// Compute gradient w.r.t. input
if (!local_input_grad.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_statistics_grad),
gpu::get_sync_info(local_output_grad),
gpu::get_sync_info(local_statistics),
gpu::get_sync_info(local_input));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_sample_size + block_size - 1) / block_size;
grid_dims.y = local_num_samples;
hydrogen::gpu::LaunchKernel(
bp_input_grad_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
sample_size, local_num_samples, local_sample_size, epsilon,
local_input.LockedBuffer(), local_input.LDim(),
local_output_grad.LockedBuffer(), local_output_grad.LDim(),
local_input_grad.Buffer(), local_input_grad.LDim(),
local_means.LockedBuffer(), local_means.LDim(),
local_vars.LockedBuffer(), local_vars.LDim(),
local_means_grad.LockedBuffer(), local_means_grad.LDim(),
local_vars_grad.LockedBuffer(), local_vars_grad.LDim());
}
}
} // namespace <anon>
// Template instantiation
template <typename TensorDataType, data_layout Layout, El::Device Device>
void layer_norm_layer<TensorDataType, Layout, Device>::fp_compute() {
fp_impl(*this->get_comm(),
this->m_epsilon,
this->get_prev_activations(),
this->get_activations(),
*this->m_statistics);
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void layer_norm_layer<TensorDataType, Layout, Device>::bp_compute() {
bp_impl(*this->get_comm(),
this->m_epsilon,
this->get_prev_activations(),
this->get_prev_error_signals(),
this->get_error_signals(),
*this->m_statistics,
*this->m_statistics_gradient);
}
#define PROTO(T) \
template class layer_norm_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class layer_norm_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| c1ed794f6794d104841d3534b147dca288bff0d6.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_LAYER_NORM_LAYER_INSTANTIATE
#include "lbann/comm_impl.hpp"
#include "lbann/layers/regularizers/layer_norm.hpp"
#include "lbann/utils/gpu/helpers.hpp"
#include <thrust/pair.h>
namespace lbann {
namespace {
/** Functor for adding @c thrust::pair objects. */
template <typename Pair>
struct pair_sum {
__device__ __forceinline__
Pair operator()(const Pair& x, const Pair& y) {
return Pair(x.first+y.first, x.second+y.second);
}
};
/** Accumulate sums and sums of squares for each data sample.
*
* On input, sums and sqsums are filled with zeros.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (local_sample_size / bsize) x local_num_samples x 1
*/
template <size_t bdimx, typename TensorDataType>
__global__ void fp_sums_kernel(
size_t local_num_samples,
size_t local_sample_size,
const TensorDataType* __restrict__ vals,
size_t vals_ldim,
TensorDataType* sums,
size_t sums_stride,
TensorDataType* sqsums,
size_t sqsums_stride) {
// Indices and dimensions
constexpr size_t bdimy = 1;
constexpr size_t bdimz = 1;
const size_t tid = threadIdx.x + blockDim.x * threadIdx.y;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t i = gidy; i < local_num_samples; i += nthreadsy) {
// Accumulate sums and perform block-wide reduction
using pair_t = thrust::pair<TensorDataType,TensorDataType>;
using pair_sum_t = pair_sum<pair_t>;
pair_t sum_sqsum(0,0);
for (size_t j = gidx; j < local_sample_size; j += nthreadsx) {
const auto& x = vals[i*vals_ldim + j];
sum_sqsum.first += x;
sum_sqsum.second += x * x;
}
sum_sqsum = gpu_lib::block_reduce<bdimx,bdimy,bdimz,pair_t,pair_sum_t>(sum_sqsum);
// Output result to global memory
if (tid == 0) {
gpu_lib::atomic_add(&sums[i*sums_stride], sum_sqsum.first);
gpu_lib::atomic_add(&sqsums[i*sqsums_stride], sum_sqsum.second);
}
}
}
/** Compute per-sample statistics.
*
* mean = sum(x_i) / n
*
* var = ( sum(x_i^2)/n - mean^2 )
*
* On input, means contains per-sample sums and vars contains
* per-sample sums of squares.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (local_num_samples / bsize) x 1 x 1
*/
template <typename TensorDataType>
__global__ void fp_statistics_kernel(
unsigned long long sample_size,
size_t local_num_samples,
TensorDataType* means,
size_t means_stride,
TensorDataType* vars,
size_t vars_stride) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i = gid; i < local_num_samples; i += nthreads) {
const auto sum = means[i*means_stride];
const auto sqsum = vars[i*means_stride];
const TensorDataType sample_size_dt = TensorDataType(sample_size);
const auto& mean = sum / sample_size_dt;
const auto& sqmean = sqsum / sample_size_dt;
const auto& var = (sqmean - mean*mean);
means[i*means_stride] = mean;
vars[i*vars_stride] = gpu_lib::max(var, TensorDataType(0.0));
}
}
/** Compute outputs.
*
* y_i = (x_i - mean) / sqrt(var + epsilon)
*
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (local_sample_size / bdimx) x (local_num_samples / bdimy) x 1
*/
template <typename TensorDataType>
__global__ void fp_output_kernel(
size_t local_num_samples,
size_t local_sample_size,
TensorDataType epsilon,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* means,
size_t means_stride,
const TensorDataType* vars,
size_t vars_stride) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t i = gidy; i < local_num_samples; i += nthreadsy) {
const auto& mean = means[i*means_stride];
const auto& var = vars[i*vars_stride];
const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon);
for (size_t j = gidx; j < local_sample_size; j += nthreadsx) {
const auto& x = input[i*input_ldim + j];
auto& y = output[i*output_ldim + j];
y = (x - mean) * inv_stdev;
}
}
}
/** @brief Forward prop */
template <typename TensorDataType>
void fp_impl(lbann_comm& comm,
TensorDataType epsilon,
const El::AbstractDistMatrix<TensorDataType>& input,
El::AbstractDistMatrix<TensorDataType>& output,
El::AbstractDistMatrix<TensorDataType>& statistics) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Workspace buffer
statistics.Empty(false);
statistics.AlignWith(input);
statistics.Resize(2, input.Width());
// Local matrices
const auto& local_input = dynamic_cast<const GPUMatType&>(input.LockedMatrix());
auto& local_output = dynamic_cast<GPUMatType&>(output.Matrix());
auto& local_statistics = dynamic_cast<GPUMatType&>(statistics.Matrix());
auto local_means = El::View(local_statistics, El::IR(0), El::ALL);
auto local_vars = El::View(local_statistics, El::IR(1), El::ALL);
// Dimensions
const size_t sample_size = input.Height();
const size_t local_num_samples = local_input.Width();
const size_t local_sample_size = local_input.Height();
// Trivial cases
if (local_num_samples < 1) { return; }
// Compute sums
El::Zero(statistics);
if (!local_input.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_statistics),
gpu::get_sync_info(local_input));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_sample_size + block_size - 1) / block_size;
grid_dims.y = local_num_samples;
hydrogen::gpu::LaunchKernel(
fp_sums_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_num_samples, local_sample_size,
local_input.LockedBuffer(), local_input.LDim(),
local_means.Buffer(), local_means.LDim(),
local_vars.Buffer(), local_vars.LDim());
}
comm.allreduce(statistics, statistics.RedundantComm(), El::mpi::SUM);
// Compute statistics from sums
if (sample_size <= 1) {
// local_means already has correct values
El::Fill(local_vars, El::TypeTraits<TensorDataType>::One());
}
else if (!local_statistics.IsEmpty()) {
auto sync_info = gpu::get_sync_info(local_statistics);
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_num_samples + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
fp_statistics_kernel<TensorDataType>,
grid_dims, block_dims, 0, sync_info,
sample_size, local_num_samples,
local_means.Buffer(), local_means.LDim(),
local_vars.Buffer(), local_vars.LDim());
}
// Apply layer norm
if (!local_output.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_statistics),
gpu::get_sync_info(local_input));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_sample_size + block_size - 1) / block_size;
grid_dims.y = local_num_samples;
hydrogen::gpu::LaunchKernel(
fp_output_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_num_samples, local_sample_size, epsilon,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
local_means.LockedBuffer(), local_means.LDim(),
local_vars.LockedBuffer(), local_vars.LDim());
}
}
/** Compute gradients w.r.t. per-sample statistics.
*
* dL/dmean = - sum(dL/dy_i) / sqrt(var+epsilon)
*
* dL/dvar = - sum(dL/dy_i * (x_i-mean)) * (var+epsilon)^(-3/2) / 2
*
* On input, means_grad and vars_grad are filled with zeros.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (local_sample_size / bsize) x local_num_samples x 1
*/
template <size_t bdimx, typename TensorDataType>
__global__ void bp_statistics_grad_kernel(
size_t local_num_samples,
size_t local_sample_size,
TensorDataType epsilon,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ output_grad,
size_t output_grad_ldim,
const TensorDataType* means,
size_t means_stride,
const TensorDataType* vars,
size_t vars_stride,
TensorDataType* means_grad,
size_t means_grad_stride,
TensorDataType* vars_grad,
size_t vars_grad_stride) {
// Indices and dimensions
constexpr size_t bdimy = 1;
constexpr size_t bdimz = 1;
const size_t tid = threadIdx.x + blockDim.x * threadIdx.y;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t i = gidy; i < local_num_samples; i += nthreadsy) {
// Accumulate sums and perform block-wide reduction
using pair_t = thrust::pair<TensorDataType,TensorDataType>;
using pair_sum_t = pair_sum<pair_t>;
pair_t sums(0,0);
const auto& mean = means[i*means_stride];
for (size_t j = gidx; j < local_sample_size; j += nthreadsx) {
const auto& x = input[i*input_ldim + j];
const auto& dy = output_grad[i*output_grad_ldim + j];
sums.first += dy;
sums.second += dy * (x - mean);
}
sums = gpu_lib::block_reduce<bdimx,bdimy,bdimz,pair_t,pair_sum_t>(sums);
// Output result to global memory
if (tid == 0) {
const auto& var = vars[i*vars_stride];
const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon);
const TensorDataType dmean = -sums.first * inv_stdev;
const TensorDataType dvar = -sums.second * inv_stdev*inv_stdev*inv_stdev / TensorDataType(2);
gpu_lib::atomic_add(&means_grad[i*means_grad_stride], dmean);
gpu_lib::atomic_add(&vars_grad[i*vars_grad_stride], dvar);
}
}
}
/** Compute gradients w.r.t. input.
*
* dL/dx_i = ( dL/dy_i / sqrt(var+epsilon)
* + dL/dmean / n
* + dL/dvar * (x_i - mean) * 2/(n-1) )
*
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (local_sample_size / bdimx) x (local_num_samples / bdimy) x 1
*/
template <typename TensorDataType>
__global__ void bp_input_grad_kernel(
unsigned long long sample_size,
size_t local_num_samples,
size_t local_sample_size,
TensorDataType epsilon,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ output_grad,
size_t output_grad_ldim,
TensorDataType* __restrict__ input_grad,
size_t input_grad_ldim,
const TensorDataType* __restrict__ means,
size_t means_stride,
const TensorDataType* __restrict__ vars,
size_t vars_stride,
const TensorDataType* means_grad,
size_t means_grad_stride,
const TensorDataType* vars_grad,
size_t vars_grad_stride) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t i = gidy; i < local_num_samples; i += nthreadsy) {
const auto& mean = means[i*means_stride];
const auto& var = vars[i*vars_stride];
const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon);
const auto& dmean = means_grad[i*means_grad_stride];
const auto& dvar = vars_grad[i*vars_grad_stride];
for (size_t j = gidx; j < local_sample_size; j += nthreadsx) {
const auto& x = input[i*input_ldim + j];
const auto& dy = output_grad[i*output_grad_ldim + j];
auto& dx = input_grad[i*input_grad_ldim + j];
dx = (dy * inv_stdev
+ dmean / TensorDataType(sample_size)
+ dvar * (x - mean) * TensorDataType(2) / TensorDataType(sample_size));
}
}
}
/** @brief Backprop */
template <typename TensorDataType>
void bp_impl(lbann_comm& comm,
TensorDataType epsilon,
const El::AbstractDistMatrix<TensorDataType>& input,
const El::AbstractDistMatrix<TensorDataType>& output_grad,
El::AbstractDistMatrix<TensorDataType>& input_grad,
const El::AbstractDistMatrix<TensorDataType>& statistics,
El::AbstractDistMatrix<TensorDataType>& statistics_grad) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Workspace buffer
statistics_grad.Empty(false);
statistics_grad.AlignWith(input);
statistics_grad.Resize(2, input.Width());
// Local matrices
const auto& local_input = dynamic_cast<const GPUMatType&>(input.LockedMatrix());
const auto& local_output_grad = dynamic_cast<const GPUMatType&>(output_grad.LockedMatrix());
auto& local_input_grad = dynamic_cast<GPUMatType&>(input_grad.Matrix());
const auto& local_statistics = dynamic_cast<const GPUMatType&>(statistics.LockedMatrix());
const auto local_means = El::LockedView(local_statistics, El::IR(0), El::ALL);
const auto local_vars = El::LockedView(local_statistics, El::IR(1), El::ALL);
auto& local_statistics_grad = dynamic_cast<GPUMatType&>(statistics_grad.Matrix());
auto local_means_grad = El::View(local_statistics_grad, El::IR(0), El::ALL);
auto local_vars_grad = El::View(local_statistics_grad, El::IR(1), El::ALL);
// Dimensions
const size_t sample_size = input.Height();
const size_t local_num_samples = local_input.Width();
const size_t local_sample_size = local_input.Height();
// Trivial case if sample size <= 1
// Note: Output is constant, so error signal is zero.
if (sample_size <= 1) {
El::Zero(input_grad);
return;
}
// Compute gradient w.r.t. statistics
El::Zero(statistics_grad);
if (!local_output_grad.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_statistics_grad),
gpu::get_sync_info(local_output_grad),
gpu::get_sync_info(local_statistics),
gpu::get_sync_info(local_input));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_sample_size + block_size - 1) / block_size;
grid_dims.y = local_num_samples;
hydrogen::gpu::LaunchKernel(
bp_statistics_grad_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_num_samples, local_sample_size, epsilon,
local_input.LockedBuffer(), local_input.LDim(),
local_output_grad.LockedBuffer(), local_output_grad.LDim(),
local_means.LockedBuffer(), local_means.LDim(),
local_vars.LockedBuffer(), local_vars.LDim(),
local_means_grad.Buffer(), local_means_grad.LDim(),
local_vars_grad.Buffer(), local_vars_grad.LDim());
}
comm.allreduce(statistics_grad,
statistics_grad.RedundantComm(),
El::mpi::SUM);
// Compute gradient w.r.t. input
if (!local_input_grad.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_statistics_grad),
gpu::get_sync_info(local_output_grad),
gpu::get_sync_info(local_statistics),
gpu::get_sync_info(local_input));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_sample_size + block_size - 1) / block_size;
grid_dims.y = local_num_samples;
hydrogen::gpu::LaunchKernel(
bp_input_grad_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
sample_size, local_num_samples, local_sample_size, epsilon,
local_input.LockedBuffer(), local_input.LDim(),
local_output_grad.LockedBuffer(), local_output_grad.LDim(),
local_input_grad.Buffer(), local_input_grad.LDim(),
local_means.LockedBuffer(), local_means.LDim(),
local_vars.LockedBuffer(), local_vars.LDim(),
local_means_grad.LockedBuffer(), local_means_grad.LDim(),
local_vars_grad.LockedBuffer(), local_vars_grad.LDim());
}
}
} // namespace <anon>
// Template instantiation
template <typename TensorDataType, data_layout Layout, El::Device Device>
void layer_norm_layer<TensorDataType, Layout, Device>::fp_compute() {
fp_impl(*this->get_comm(),
this->m_epsilon,
this->get_prev_activations(),
this->get_activations(),
*this->m_statistics);
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void layer_norm_layer<TensorDataType, Layout, Device>::bp_compute() {
bp_impl(*this->get_comm(),
this->m_epsilon,
this->get_prev_activations(),
this->get_prev_error_signals(),
this->get_error_signals(),
*this->m_statistics,
*this->m_statistics_gradient);
}
#define PROTO(T) \
template class layer_norm_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class layer_norm_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
1db08edcd450bd2fc0e7d23bbe221a73c4d63454.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <stdint.h>
#include <math.h>
#define MAX_PATH_LEN (32 * 1024)
#define MAX_KERNEL_RADIUS 16
// thread block size
#define TX 32
#define TY 32
struct kernel_params {
float kernel[MAX_KERNEL_RADIUS + 1];
int w;
int h;
};
static void error(const char * message) {
fprintf(stderr, "ERROR: %s\n", message);
exit(-1);
}
static void usage(const char * message, const char * app) {
fprintf(stderr, "Usage: %s width height sigma file1 ... fileN\n", app);
fprintf(stderr, "Example: %s 1920 1080 3 f1.gray f2.gray f3.gray\n", app);
error(message);
}
static double timer_ms() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000.0 + tv.tv_usec * 0.001;
}
static __device__ int saturate(int n, int max_value) {
return max(0, min(n, max_value - 1));
}
static __device__ int get_pix(const uint8_t * src, int w, int h, int x, int y) {
return (float)src[saturate(x, w) + saturate(y, h) * w];
}
static __global__ void convolution_vertical(kernel_params p, uint8_t * src, uint8_t * dest) {
// coordinates of pixel processed by this thread
const int x = threadIdx.x + blockIdx.x * TX;
const int y = threadIdx.y + blockIdx.y * TY;
// shared cache for processed pixels
__shared__ float cache[TY + 2 * MAX_KERNEL_RADIUS][TX];
// all threads populate shared cache
for(int ny = 0; ny < 2; ny++) {
cache[threadIdx.y + ny * TY][threadIdx.x]
= get_pix(src, p.w, p.h, x, y - MAX_KERNEL_RADIUS + ny * TY);
}
// wait for all threads of block to finish their contribution to cache
__syncthreads();
// stop this thread if out of bounds
if(x >= p.w || y >= p.h) {
return;
}
// get weighted sum of neighbors
float result = p.kernel[0] * cache[MAX_KERNEL_RADIUS + threadIdx.y][threadIdx.x];
for(int k = 1; k <= MAX_KERNEL_RADIUS; k++) {
result += p.kernel[k] * cache[MAX_KERNEL_RADIUS + threadIdx.y - k][threadIdx.x];
result += p.kernel[k] * cache[MAX_KERNEL_RADIUS + threadIdx.y + k][threadIdx.x];
}
// save result
dest[x + y * p.w] = saturate((int)result, 256);
}
static __global__ void convolution_horizontal(kernel_params p, uint8_t * src, uint8_t * dest) {
// coordinates of pixel processed by this thread
const int x = threadIdx.x + blockIdx.x * TX;
const int y = threadIdx.y + blockIdx.y * TY;
// shared cache for processed pixels
__shared__ float cache[TY][TX + 2 * MAX_KERNEL_RADIUS];
// all threads populate shared cache
for(int nx = 0; nx < 2; nx++) {
cache[threadIdx.y][threadIdx.x + nx * TX]
= get_pix(src, p.w, p.h, x - MAX_KERNEL_RADIUS + nx * TX, y);
}
// wait for all threads of block to finish their contribution to cache
__syncthreads();
// stop this thread if out of bounds
if(x >= p.w || y >= p.h) {
return;
}
// get weighted sum of neighbors
float result = p.kernel[0] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x];
for(int k = 1; k <= MAX_KERNEL_RADIUS; k++) {
result += p.kernel[k] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x + k];
result += p.kernel[k] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x - k];
}
// save result
dest[x + y * p.w] = saturate((int)result, 256);
}
static float gaussian(float sigma, float x) {
const float e = x / sigma;
return exp(-0.5 * e * e);
}
int main(int argn, char ** argv) {
kernel_params params;
if(argn < 4) {
usage("Wrong argument count", *argv);
}
// read width and height
params.w = atoi(argv[1]);
params.h = atoi(argv[2]);
if(params.w < 1 || params.h < 1) {
usage("Both width and height must be positive integers", *argv);
}
const int pix_count = params.w * params.h;
// read sigma and prepare normalized kernel (sum = 1)
const float sigma = atof(argv[3]);
float kernel_sum = 0.0f;
for(int k = 0; k <= MAX_KERNEL_RADIUS; k++) {
kernel_sum += params.kernel[k] = gaussian(sigma, k);
}
kernel_sum = 2.0 * kernel_sum - params.kernel[0];
for(int k = 0; k <= MAX_KERNEL_RADIUS; k++) {
params.kernel[k] /= kernel_sum;
}
// dump the kernel
printf("Convolution kernel:");
for(int k = -MAX_KERNEL_RADIUS; k <= MAX_KERNEL_RADIUS; k++) {
printf(" %f", params.kernel[k < 0 ? -k : k]);
}
printf("\n");
// prepare buffers
uint8_t * const data_ptr = (uint8_t*)malloc(pix_count);
uint8_t * data_gpu_ptr[2];
uint8_t * temp_gpu_ptr[2];
hipMalloc((void**)(data_gpu_ptr + 0), pix_count);
hipMalloc((void**)(temp_gpu_ptr + 0), pix_count);
hipMalloc((void**)(data_gpu_ptr + 1), pix_count);
hipMalloc((void**)(temp_gpu_ptr + 1), pix_count);
// two CUDA streams for asynchronous kernel and data transfers
hipStream_t streams[2];
hipStreamCreate(streams + 0);
hipStreamCreate(streams + 1);
// measure time of processing of all images
const double begin = timer_ms();
for(int i = 3; i <= argn; i++) {
// index of I/O buffers in this iteration
const int io_idx = i & 1;
// index of computing resources in this iteration
const int comp_idx = io_idx ^ 1;
// start processing of image loaded in previous iteration
// (except of first and last iteration)
if(i > 3 && i < argn) {
// launch vertical and horizontal pass
dim3 block(TX, TY);
dim3 grid((params.w + TX - 1) / TX, (params.h + TY - 1) / TY);
hipLaunchKernelGGL(( convolution_vertical), dim3(grid), dim3(block), 0, streams[comp_idx],
params, data_gpu_ptr[comp_idx], temp_gpu_ptr[comp_idx]);
hipLaunchKernelGGL(( convolution_horizontal), dim3(grid), dim3(block), 0, streams[comp_idx],
params, temp_gpu_ptr[comp_idx], data_gpu_ptr[comp_idx]);
}
// processing now runs asynchronously on the GPU => save reauls
// from previous iteration (except of two first iterations)
if(i > 4) {
// copy data back from GPU
hipMemcpyAsync(data_ptr, data_gpu_ptr[io_idx], pix_count,
hipMemcpyDeviceToHost, streams[io_idx]);
// compose output filename
char out_path[MAX_PATH_LEN + 1];
snprintf(out_path, MAX_PATH_LEN, "%s.out.gray", argv[i - 1]);
// wait for the data to actually appear in the buffer
hipStreamSynchronize(streams[io_idx]);
// write data to output file
FILE * const out_file = fopen(out_path, "wb");
if(NULL == out_file || 1 != fwrite(data_ptr, pix_count, 1, out_file)) {
error(out_path);
}
fclose(out_file);
}
// load input for next iteration (except of two last iterations)
if(i < (argn - 1)) {
// read input file
printf("Processing '%s'\n", argv[i + 1]);
FILE * const src_file = fopen(argv[i + 1], "rb");
if(NULL == src_file || 1 != fread(data_ptr, pix_count, 1, src_file)) {
error(argv[i + 1]);
}
fclose(src_file);
// copy data to GPU memory
hipMemcpyAsync(data_gpu_ptr[io_idx], data_ptr, pix_count,
hipMemcpyHostToDevice, streams[io_idx]);
// make sure that the buffer is ready for next iteration
hipStreamSynchronize(streams[io_idx]);
}
}
const double end = timer_ms();
// print total time
printf("time: %f ms, %d images => %f ms/image\n",
end - begin, argn - 4, (end - begin) / (argn - 4));
// cleanup
free(data_ptr);
hipStreamDestroy(streams[0]);
hipStreamDestroy(streams[1]);
hipFree(data_gpu_ptr[0]);
hipFree(temp_gpu_ptr[0]);
hipFree(data_gpu_ptr[1]);
hipFree(temp_gpu_ptr[1]);
return 0;
}
| 1db08edcd450bd2fc0e7d23bbe221a73c4d63454.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <stdint.h>
#include <math.h>
#define MAX_PATH_LEN (32 * 1024)
#define MAX_KERNEL_RADIUS 16
// thread block size
#define TX 32
#define TY 32
struct kernel_params {
float kernel[MAX_KERNEL_RADIUS + 1];
int w;
int h;
};
static void error(const char * message) {
fprintf(stderr, "ERROR: %s\n", message);
exit(-1);
}
static void usage(const char * message, const char * app) {
fprintf(stderr, "Usage: %s width height sigma file1 ... fileN\n", app);
fprintf(stderr, "Example: %s 1920 1080 3 f1.gray f2.gray f3.gray\n", app);
error(message);
}
static double timer_ms() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000.0 + tv.tv_usec * 0.001;
}
static __device__ int saturate(int n, int max_value) {
return max(0, min(n, max_value - 1));
}
static __device__ int get_pix(const uint8_t * src, int w, int h, int x, int y) {
return (float)src[saturate(x, w) + saturate(y, h) * w];
}
static __global__ void convolution_vertical(kernel_params p, uint8_t * src, uint8_t * dest) {
// coordinates of pixel processed by this thread
const int x = threadIdx.x + blockIdx.x * TX;
const int y = threadIdx.y + blockIdx.y * TY;
// shared cache for processed pixels
__shared__ float cache[TY + 2 * MAX_KERNEL_RADIUS][TX];
// all threads populate shared cache
for(int ny = 0; ny < 2; ny++) {
cache[threadIdx.y + ny * TY][threadIdx.x]
= get_pix(src, p.w, p.h, x, y - MAX_KERNEL_RADIUS + ny * TY);
}
// wait for all threads of block to finish their contribution to cache
__syncthreads();
// stop this thread if out of bounds
if(x >= p.w || y >= p.h) {
return;
}
// get weighted sum of neighbors
float result = p.kernel[0] * cache[MAX_KERNEL_RADIUS + threadIdx.y][threadIdx.x];
for(int k = 1; k <= MAX_KERNEL_RADIUS; k++) {
result += p.kernel[k] * cache[MAX_KERNEL_RADIUS + threadIdx.y - k][threadIdx.x];
result += p.kernel[k] * cache[MAX_KERNEL_RADIUS + threadIdx.y + k][threadIdx.x];
}
// save result
dest[x + y * p.w] = saturate((int)result, 256);
}
static __global__ void convolution_horizontal(kernel_params p, uint8_t * src, uint8_t * dest) {
// coordinates of pixel processed by this thread
const int x = threadIdx.x + blockIdx.x * TX;
const int y = threadIdx.y + blockIdx.y * TY;
// shared cache for processed pixels
__shared__ float cache[TY][TX + 2 * MAX_KERNEL_RADIUS];
// all threads populate shared cache
for(int nx = 0; nx < 2; nx++) {
cache[threadIdx.y][threadIdx.x + nx * TX]
= get_pix(src, p.w, p.h, x - MAX_KERNEL_RADIUS + nx * TX, y);
}
// wait for all threads of block to finish their contribution to cache
__syncthreads();
// stop this thread if out of bounds
if(x >= p.w || y >= p.h) {
return;
}
// get weighted sum of neighbors
float result = p.kernel[0] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x];
for(int k = 1; k <= MAX_KERNEL_RADIUS; k++) {
result += p.kernel[k] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x + k];
result += p.kernel[k] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x - k];
}
// save result
dest[x + y * p.w] = saturate((int)result, 256);
}
static float gaussian(float sigma, float x) {
const float e = x / sigma;
return exp(-0.5 * e * e);
}
int main(int argn, char ** argv) {
kernel_params params;
if(argn < 4) {
usage("Wrong argument count", *argv);
}
// read width and height
params.w = atoi(argv[1]);
params.h = atoi(argv[2]);
if(params.w < 1 || params.h < 1) {
usage("Both width and height must be positive integers", *argv);
}
const int pix_count = params.w * params.h;
// read sigma and prepare normalized kernel (sum = 1)
const float sigma = atof(argv[3]);
float kernel_sum = 0.0f;
for(int k = 0; k <= MAX_KERNEL_RADIUS; k++) {
kernel_sum += params.kernel[k] = gaussian(sigma, k);
}
kernel_sum = 2.0 * kernel_sum - params.kernel[0];
for(int k = 0; k <= MAX_KERNEL_RADIUS; k++) {
params.kernel[k] /= kernel_sum;
}
// dump the kernel
printf("Convolution kernel:");
for(int k = -MAX_KERNEL_RADIUS; k <= MAX_KERNEL_RADIUS; k++) {
printf(" %f", params.kernel[k < 0 ? -k : k]);
}
printf("\n");
// prepare buffers
uint8_t * const data_ptr = (uint8_t*)malloc(pix_count);
uint8_t * data_gpu_ptr[2];
uint8_t * temp_gpu_ptr[2];
cudaMalloc((void**)(data_gpu_ptr + 0), pix_count);
cudaMalloc((void**)(temp_gpu_ptr + 0), pix_count);
cudaMalloc((void**)(data_gpu_ptr + 1), pix_count);
cudaMalloc((void**)(temp_gpu_ptr + 1), pix_count);
// two CUDA streams for asynchronous kernel and data transfers
cudaStream_t streams[2];
cudaStreamCreate(streams + 0);
cudaStreamCreate(streams + 1);
// measure time of processing of all images
const double begin = timer_ms();
for(int i = 3; i <= argn; i++) {
// index of I/O buffers in this iteration
const int io_idx = i & 1;
// index of computing resources in this iteration
const int comp_idx = io_idx ^ 1;
// start processing of image loaded in previous iteration
// (except of first and last iteration)
if(i > 3 && i < argn) {
// launch vertical and horizontal pass
dim3 block(TX, TY);
dim3 grid((params.w + TX - 1) / TX, (params.h + TY - 1) / TY);
convolution_vertical<<<grid, block, 0, streams[comp_idx]>>>
(params, data_gpu_ptr[comp_idx], temp_gpu_ptr[comp_idx]);
convolution_horizontal<<<grid, block, 0, streams[comp_idx]>>>
(params, temp_gpu_ptr[comp_idx], data_gpu_ptr[comp_idx]);
}
// processing now runs asynchronously on the GPU => save reauls
// from previous iteration (except of two first iterations)
if(i > 4) {
// copy data back from GPU
cudaMemcpyAsync(data_ptr, data_gpu_ptr[io_idx], pix_count,
cudaMemcpyDeviceToHost, streams[io_idx]);
// compose output filename
char out_path[MAX_PATH_LEN + 1];
snprintf(out_path, MAX_PATH_LEN, "%s.out.gray", argv[i - 1]);
// wait for the data to actually appear in the buffer
cudaStreamSynchronize(streams[io_idx]);
// write data to output file
FILE * const out_file = fopen(out_path, "wb");
if(NULL == out_file || 1 != fwrite(data_ptr, pix_count, 1, out_file)) {
error(out_path);
}
fclose(out_file);
}
// load input for next iteration (except of two last iterations)
if(i < (argn - 1)) {
// read input file
printf("Processing '%s'\n", argv[i + 1]);
FILE * const src_file = fopen(argv[i + 1], "rb");
if(NULL == src_file || 1 != fread(data_ptr, pix_count, 1, src_file)) {
error(argv[i + 1]);
}
fclose(src_file);
// copy data to GPU memory
cudaMemcpyAsync(data_gpu_ptr[io_idx], data_ptr, pix_count,
cudaMemcpyHostToDevice, streams[io_idx]);
// make sure that the buffer is ready for next iteration
cudaStreamSynchronize(streams[io_idx]);
}
}
const double end = timer_ms();
// print total time
printf("time: %f ms, %d images => %f ms/image\n",
end - begin, argn - 4, (end - begin) / (argn - 4));
// cleanup
free(data_ptr);
cudaStreamDestroy(streams[0]);
cudaStreamDestroy(streams[1]);
cudaFree(data_gpu_ptr[0]);
cudaFree(temp_gpu_ptr[0]);
cudaFree(data_gpu_ptr[1]);
cudaFree(temp_gpu_ptr[1]);
return 0;
}
|
d91c0e635e9694da1f758e9ad461c39ed4aa7189.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
int device = -1;
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipGetDevice(&device);
// GPU prefetches unified memory memory
hipMemPrefetchAsync(x, N*sizeof(float), device, NULL);
hipMemPrefetchAsync(y, N*sizeof(float), device, NULL);
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Host prefecthes Memory
hipMemPrefetchAsync(y, N*sizeof(float), hipCpuDeviceId, NULL);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| d91c0e635e9694da1f758e9ad461c39ed4aa7189.cu | #include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
int device = -1;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaGetDevice(&device);
// GPU prefetches unified memory memory
cudaMemPrefetchAsync(x, N*sizeof(float), device, NULL);
cudaMemPrefetchAsync(y, N*sizeof(float), device, NULL);
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Host prefecthes Memory
cudaMemPrefetchAsync(y, N*sizeof(float), cudaCpuDeviceId, NULL);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
4bcd93d272a6c8a4116fbc7f88cbc1e8a40fcce0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudev.hpp>
#include <iostream>
#include "mbb_blend.h"
using namespace cv::cuda;
using cv::cudev::divUp;
using namespace std;
__global__ void BlendLayer(PtrStepSz<float3> image,
PtrStepSz<float> weight,
PtrStepSz<float3> dst_image,
int blend_w, int blend_h) {
const int y = blockDim.x * blockIdx.x + threadIdx.x;
const int x = blockDim.y * blockIdx.y + threadIdx.y;
if (x < 0 || y < 0 || x >= blend_w || y >= blend_h) {
return;
}
dst_image(y, x).x = dst_image(y, x).x + image(y, x).x * weight(y, x);
dst_image(y, x).y = dst_image(y, x).y + image(y, x).y * weight(y, x);
dst_image(y, x).z = dst_image(y, x).z + image(y, x).z * weight(y, x);
}
__global__ void NormalizeLayer(PtrStepSz<float3> dst_image,
PtrStepSz<float> weight,
int blend_w, int blend_h) {
const int y = blockDim.x * blockIdx.x + threadIdx.x;
const int x = blockDim.y * blockIdx.y + threadIdx.y;
if (x < 0 || y < 0 || x >= blend_w || y >= blend_h) {
return;
}
dst_image(y, x).x /= (weight(y, x) + BLEND_WEIGHT_EPS);
dst_image(y, x).y /= (weight(y, x) + BLEND_WEIGHT_EPS);
dst_image(y, x).z /= (weight(y, x) + BLEND_WEIGHT_EPS);
}
void MultiBandBlend::BlendPyramidLayers() {
const dim3 thread_size_2d = dim3(1, 512);
for (int layer = 0; layer <= band_num; layer++) {
int blend_w = d_dst_laplace_pyr[layer].cols;
int blend_h = d_dst_laplace_pyr[layer].rows;
d_dst_laplace_pyr[layer].setTo(cv::Scalar::all(0));
const dim3 blend_blocks = dim3(divUp(blend_h, thread_size_2d.x), divUp(blend_w, thread_size_2d.y));
for (int iter = 0; iter < stitch_num; iter++) {
BlendLayer << < blend_blocks, thread_size_2d >> > (d_src_laplace_pyr[iter][layer],
d_src_weight_pyr[iter][layer],
d_dst_laplace_pyr[layer],
blend_w, blend_h);
hipDeviceSynchronize();
}
NormalizeLayer << < blend_blocks, thread_size_2d >> > (d_dst_laplace_pyr[layer],
d_dst_weight_pyr[layer],
blend_w, blend_h);
hipDeviceSynchronize();
}
}
| 4bcd93d272a6c8a4116fbc7f88cbc1e8a40fcce0.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudev.hpp>
#include <iostream>
#include "mbb_blend.h"
using namespace cv::cuda;
using cv::cudev::divUp;
using namespace std;
__global__ void BlendLayer(PtrStepSz<float3> image,
PtrStepSz<float> weight,
PtrStepSz<float3> dst_image,
int blend_w, int blend_h) {
const int y = blockDim.x * blockIdx.x + threadIdx.x;
const int x = blockDim.y * blockIdx.y + threadIdx.y;
if (x < 0 || y < 0 || x >= blend_w || y >= blend_h) {
return;
}
dst_image(y, x).x = dst_image(y, x).x + image(y, x).x * weight(y, x);
dst_image(y, x).y = dst_image(y, x).y + image(y, x).y * weight(y, x);
dst_image(y, x).z = dst_image(y, x).z + image(y, x).z * weight(y, x);
}
__global__ void NormalizeLayer(PtrStepSz<float3> dst_image,
PtrStepSz<float> weight,
int blend_w, int blend_h) {
const int y = blockDim.x * blockIdx.x + threadIdx.x;
const int x = blockDim.y * blockIdx.y + threadIdx.y;
if (x < 0 || y < 0 || x >= blend_w || y >= blend_h) {
return;
}
dst_image(y, x).x /= (weight(y, x) + BLEND_WEIGHT_EPS);
dst_image(y, x).y /= (weight(y, x) + BLEND_WEIGHT_EPS);
dst_image(y, x).z /= (weight(y, x) + BLEND_WEIGHT_EPS);
}
void MultiBandBlend::BlendPyramidLayers() {
const dim3 thread_size_2d = dim3(1, 512);
for (int layer = 0; layer <= band_num; layer++) {
int blend_w = d_dst_laplace_pyr[layer].cols;
int blend_h = d_dst_laplace_pyr[layer].rows;
d_dst_laplace_pyr[layer].setTo(cv::Scalar::all(0));
const dim3 blend_blocks = dim3(divUp(blend_h, thread_size_2d.x), divUp(blend_w, thread_size_2d.y));
for (int iter = 0; iter < stitch_num; iter++) {
BlendLayer << < blend_blocks, thread_size_2d >> > (d_src_laplace_pyr[iter][layer],
d_src_weight_pyr[iter][layer],
d_dst_laplace_pyr[layer],
blend_w, blend_h);
cudaDeviceSynchronize();
}
NormalizeLayer << < blend_blocks, thread_size_2d >> > (d_dst_laplace_pyr[layer],
d_dst_weight_pyr[layer],
blend_w, blend_h);
cudaDeviceSynchronize();
}
}
|
8b886b318f404cfb38725fe02344be4dfef0b794.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <iostream>
__global__ void vector_addition_kernel(int* A, int* B, int* result, int vLen) {
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
if (threadId < vLen)
result[threadId] = A[threadId] + B[threadId];
}
class VectorAdditionUnifiedMemory {
public:
VectorAdditionUnifiedMemory(int, int, int);
void run(bool);
private:
int vectorLength, threadBlockSize, gridSize;
void initVector(int*, int);
void displayResult(int*, int*, int*, int);
void checkResult(int*, int*, int*, int);
};
VectorAdditionUnifiedMemory::VectorAdditionUnifiedMemory(int vectorLength, int threadBlockSize, int gridSize) {
this->vectorLength = vectorLength;
this->threadBlockSize = threadBlockSize;
this->gridSize = gridSize;
}
void VectorAdditionUnifiedMemory::initVector(int* vector, int vLen) {
for (int i = 0; i < vLen; i++)
vector[i] = rand() % 100;
}
void VectorAdditionUnifiedMemory::displayResult(int* A, int* B, int* result, int vLen) {
for (int i = 0; i < vLen; i++)
printf("%d + %d = %d\n", A[i], B[i], result[i]);
printf("\n");
}
void VectorAdditionUnifiedMemory::checkResult(int* A, int* B, int* result, int vLen) {
for (int i = 0; i < vLen; i++)
assert(result[i] == A[i] + B[i]);
}
void VectorAdditionUnifiedMemory::run(bool prefetchMemory) {
int deviceId = hipGetDevice(&deviceId);
printf("GPU Device ID: %d\n", deviceId);
printf("CPU Device ID: %d\n\n", hipCpuDeviceId);
int * vectorA, * vectorB, * vectorResult;
size_t vectorBytes = sizeof(int) * vectorLength;
hipMallocManaged(&vectorA, vectorBytes);
hipMallocManaged(&vectorB, vectorBytes);
hipMallocManaged(&vectorResult, vectorBytes);
initVector(vectorA, vectorLength);
initVector(vectorB, vectorLength);
if (prefetchMemory) {
hipMemPrefetchAsync(vectorA, vectorBytes, deviceId);
hipMemPrefetchAsync(vectorB, vectorBytes, deviceId);
}
hipLaunchKernelGGL(( vector_addition_kernel), dim3(threadBlockSize), dim3(gridSize), 0, 0, vectorA, vectorB, vectorResult, vectorLength);
hipDeviceSynchronize();
if (prefetchMemory)
hipMemPrefetchAsync(vectorResult, vectorBytes, hipCpuDeviceId);
if (vectorLength <= 1 << 4)
displayResult(vectorA, vectorB, vectorResult, vectorLength);
else {
checkResult(vectorA, vectorB, vectorResult, vectorLength);
printf("Program Successfully Executed");
}
hipFree(vectorA);
hipFree(vectorB);
hipFree(vectorResult);
}
int main() {
int vectorLength = 1 << 16;
int threadBlockSize = 1 << 10;
int gridSize = (vectorLength + threadBlockSize - 1) / threadBlockSize;
VectorAdditionUnifiedMemory program = VectorAdditionUnifiedMemory(vectorLength, threadBlockSize, gridSize);
program.run(true);
}
| 8b886b318f404cfb38725fe02344be4dfef0b794.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <iostream>
__global__ void vector_addition_kernel(int* A, int* B, int* result, int vLen) {
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
if (threadId < vLen)
result[threadId] = A[threadId] + B[threadId];
}
class VectorAdditionUnifiedMemory {
public:
VectorAdditionUnifiedMemory(int, int, int);
void run(bool);
private:
int vectorLength, threadBlockSize, gridSize;
void initVector(int*, int);
void displayResult(int*, int*, int*, int);
void checkResult(int*, int*, int*, int);
};
VectorAdditionUnifiedMemory::VectorAdditionUnifiedMemory(int vectorLength, int threadBlockSize, int gridSize) {
this->vectorLength = vectorLength;
this->threadBlockSize = threadBlockSize;
this->gridSize = gridSize;
}
void VectorAdditionUnifiedMemory::initVector(int* vector, int vLen) {
for (int i = 0; i < vLen; i++)
vector[i] = rand() % 100;
}
void VectorAdditionUnifiedMemory::displayResult(int* A, int* B, int* result, int vLen) {
for (int i = 0; i < vLen; i++)
printf("%d + %d = %d\n", A[i], B[i], result[i]);
printf("\n");
}
void VectorAdditionUnifiedMemory::checkResult(int* A, int* B, int* result, int vLen) {
for (int i = 0; i < vLen; i++)
assert(result[i] == A[i] + B[i]);
}
void VectorAdditionUnifiedMemory::run(bool prefetchMemory) {
int deviceId = cudaGetDevice(&deviceId);
printf("GPU Device ID: %d\n", deviceId);
printf("CPU Device ID: %d\n\n", cudaCpuDeviceId);
int * vectorA, * vectorB, * vectorResult;
size_t vectorBytes = sizeof(int) * vectorLength;
cudaMallocManaged(&vectorA, vectorBytes);
cudaMallocManaged(&vectorB, vectorBytes);
cudaMallocManaged(&vectorResult, vectorBytes);
initVector(vectorA, vectorLength);
initVector(vectorB, vectorLength);
if (prefetchMemory) {
cudaMemPrefetchAsync(vectorA, vectorBytes, deviceId);
cudaMemPrefetchAsync(vectorB, vectorBytes, deviceId);
}
vector_addition_kernel<<<threadBlockSize, gridSize>>>(vectorA, vectorB, vectorResult, vectorLength);
cudaDeviceSynchronize();
if (prefetchMemory)
cudaMemPrefetchAsync(vectorResult, vectorBytes, cudaCpuDeviceId);
if (vectorLength <= 1 << 4)
displayResult(vectorA, vectorB, vectorResult, vectorLength);
else {
checkResult(vectorA, vectorB, vectorResult, vectorLength);
printf("Program Successfully Executed");
}
cudaFree(vectorA);
cudaFree(vectorB);
cudaFree(vectorResult);
}
int main() {
int vectorLength = 1 << 16;
int threadBlockSize = 1 << 10;
int gridSize = (vectorLength + threadBlockSize - 1) / threadBlockSize;
VectorAdditionUnifiedMemory program = VectorAdditionUnifiedMemory(vectorLength, threadBlockSize, gridSize);
program.run(true);
}
|
5145e0dea975c7e01aa64624e71ce174dfd977b0.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
#define BLOCK_SIZE 8
#define THREAD_SIZE 8
#define INF 599999999
__global__ void apspKernel(int N, int k, int *g_idata, int *g_odata) {
// access thread id
const unsigned int tid = threadIdx.x;
// access block id
const unsigned int bid = blockIdx.x;
// access number of threads in this block
const unsigned int bdim = blockDim.x;
const unsigned int i = (bid * bdim + tid)/N;
const unsigned int j = (bid * bdim + tid)%N;
if (g_idata[i*N+k] == -1 || g_idata[k*N+j] == -1) g_odata[i*N+j] = g_idata[i*N+j];
else if (g_idata[i*N+j] == -1) g_odata[i*N+j] = g_idata[i*N+k]+g_idata[k*N+j];
else g_odata[i*N+j] = min(g_idata[i*N+j], g_idata[i*N+k]+g_idata[k*N+j]);
}
void par_apsp(int N, int *mat) {
//copy mat from host to device memory d_mat
int* d_mat;
int* d_mat_out;
int size = sizeof(int) * N * N;
hipMalloc((void**) &d_mat, size);
hipMemcpy(d_mat, mat, size, hipMemcpyHostToDevice);
hipMalloc((void**) &d_mat_out, size);
for (int k = 0; k < N; k++) {
hipLaunchKernelGGL(( apspKernel), dim3(ceil(N*N/256)), dim3(256), 0, 0, N, k, d_mat, d_mat_out);
hipMemcpy(d_mat, d_mat_out, size, hipMemcpyDeviceToDevice);
}
hipMemcpy(mat, d_mat, size, hipMemcpyDeviceToHost);
}
__global__ void kernel_phase_one(const unsigned int block,
const unsigned int N,
int * const d) {
int i;
int newPath;
const int VIRTUAL_BLOCK_SIZE = BLOCK_SIZE * THREAD_SIZE;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int v1 = VIRTUAL_BLOCK_SIZE * block + ty;
const int v2 = VIRTUAL_BLOCK_SIZE * block + tx;
const int cell = v1 * N + v2;
__shared__ int primary_d[VIRTUAL_BLOCK_SIZE][VIRTUAL_BLOCK_SIZE];
__shared__ int primary_p[VIRTUAL_BLOCK_SIZE][VIRTUAL_BLOCK_SIZE];
if (v1 < N && v2 < N) primary_d[ty][tx] = d[cell];
else primary_d[ty][tx] = INF;
// Synchronize to make sure the all value are loaded in block
__syncthreads();
for (i=0; i<VIRTUAL_BLOCK_SIZE; i++) {
if (primary_d[ty][i] != -1 && primary_d[i][tx] != -1) {
newPath = primary_d[ty][i] + primary_d[i][tx];
if (newPath < primary_d[ty][tx] || primary_d[ty][tx] == -1) primary_d[ty][tx] = newPath;
}
}
if (v1 < N && v2 < N) {
d[cell] = primary_d[ty][tx];
}
}
__global__ void kernel_phase_two(const unsigned int block,
const unsigned int N,
int * const d) {
if (blockIdx.x == block) return;
const int VIRTUAL_BLOCK_SIZE = BLOCK_SIZE * THREAD_SIZE;
int i;
int newPath;
int tx = threadIdx.x;
int ty = threadIdx.y;
int v1 = VIRTUAL_BLOCK_SIZE * block + ty;
int v2 = VIRTUAL_BLOCK_SIZE * block + tx;
__shared__ int primary_d[VIRTUAL_BLOCK_SIZE][VIRTUAL_BLOCK_SIZE];
__shared__ int current_d[VIRTUAL_BLOCK_SIZE][VIRTUAL_BLOCK_SIZE];
const int cell_primary = v1 * N + v2;
if (v1 < N && v2 < N) primary_d[ty][tx] = d[cell_primary];
else primary_d[ty][tx] = INF;
// Load i-aligned singly dependent blocks
if (blockIdx.y == 0) {
v1 = VIRTUAL_BLOCK_SIZE * block + ty;
v2 = VIRTUAL_BLOCK_SIZE * blockIdx.x + tx;
}
// Load j-aligned singly dependent blocks
else {
v1 = VIRTUAL_BLOCK_SIZE * blockIdx.x + ty;
v2 = VIRTUAL_BLOCK_SIZE * block + tx;
}
const int cell_current = v1 * N + v2;
if (v1 < N && v2 < N) current_d[ty][tx] = d[cell_current];
else current_d[ty][tx] = INF;
// Synchronize to make sure the all value are loaded in block
__syncthreads();
// Compute i-aligned singly dependent blocks
if (blockIdx.y == 0)
{
for (i=0; i<VIRTUAL_BLOCK_SIZE; i++) {
if (primary_d[ty][i] != -1 && current_d[i][tx] != -1) {
newPath = primary_d[ty][i] + current_d[i][tx];
if (newPath < current_d[ty][tx] || current_d[ty][tx] == -1) current_d[ty][tx] = newPath;
}
}
}
// Compute j-aligned singly dependent blocks
else {
for (i=0; i<VIRTUAL_BLOCK_SIZE; i++) {
if (current_d[ty][i] != -1 && primary_d[i][tx] != -1) {
newPath = current_d[ty][i] + primary_d[i][tx];
if (newPath < current_d[ty][tx] || current_d[ty][tx] == -1) current_d[ty][tx] = newPath;
}
}
}
if (v1 < N && v2 < N) d[cell_current] = current_d[ty][tx];
}
__global__ void kernel_phase_three(unsigned int block,
const unsigned int N,
int * const d) {
if (blockIdx.x == block || blockIdx.y == block) return;
int i, j, k;
int newPath;
int path;
const int tx = threadIdx.x * THREAD_SIZE;
const int ty = threadIdx.y * THREAD_SIZE;
const int v1 = blockDim.y * blockIdx.y * THREAD_SIZE + ty;
const int v2 = blockDim.x * blockIdx.x * THREAD_SIZE + tx;
int idx, idy;
__shared__ int primaryRow_d[BLOCK_SIZE * THREAD_SIZE][BLOCK_SIZE * THREAD_SIZE];
__shared__ int primaryCol_d[BLOCK_SIZE * THREAD_SIZE][BLOCK_SIZE * THREAD_SIZE];
int v1Row = BLOCK_SIZE * block * THREAD_SIZE + ty;
int v2Col = BLOCK_SIZE * block * THREAD_SIZE + tx;
for (i=0; i<THREAD_SIZE; i++) {
for(j=0; j<THREAD_SIZE; j++) {
idx = tx + j;
idy = ty + i;
if (v1Row + i < N && v2 + j < N) {
block = (v1Row + i) * N + v2 + j;
primaryRow_d[idy][idx] = d[block];
}
else {
primaryRow_d[idy][idx] = INF;
}
if (v1 + i < N && v2Col + j < N) {
block = (v1 + i) * N + v2Col + j;
primaryCol_d[idy][idx] = d[block];
}
else {
primaryCol_d[idy][idx] = INF;
}
}
}
// Synchronize to make sure the all value are loaded in virtual block
__syncthreads();
for (i=0; i<THREAD_SIZE; i++) {
for (j=0; j<THREAD_SIZE; j++) {
if (v1 + i < N && v2 + j < N) {
block = (v1 + i) * N + v2 + j;
path = d[block];
idy = ty + i;
idx = tx + j;
for (k=0; k<BLOCK_SIZE * THREAD_SIZE; k++) {
if (primaryCol_d[idy][k] != -1 && primaryRow_d[k][idx] != -1) {
newPath = primaryCol_d[idy][k] + primaryRow_d[k][idx];
if (path > newPath || path == -1) {
path = newPath;
}
}
}
d[block] = path;
}
}
}
}
void par_apsp_blocked_processing(int N, int *mat) {
//copy mat from host to device memory d_mat
int* d_mat;
int size = sizeof(int) * N * N;
hipMalloc((void**) &d_mat, size);
hipMemcpy(d_mat, mat, size, hipMemcpyHostToDevice);
const int VIRTUAL_BLOCK_SIZE = BLOCK_SIZE * THREAD_SIZE;
// Initialize the grid and block dimensions here
dim3 dimGridP1(1, 1, 1);
dim3 dimGridP2((N - 1) / VIRTUAL_BLOCK_SIZE + 1, 2 , 1);
dim3 dimGridP3((N - 1) / VIRTUAL_BLOCK_SIZE + 1, (N - 1) / VIRTUAL_BLOCK_SIZE + 1, 1);
dim3 dimBlockP1(VIRTUAL_BLOCK_SIZE, VIRTUAL_BLOCK_SIZE, 1);
dim3 dimBlockP2(VIRTUAL_BLOCK_SIZE, VIRTUAL_BLOCK_SIZE, 1);
dim3 dimBlockP3(BLOCK_SIZE, BLOCK_SIZE, 1);
int numOfBlock = (N - 1) / VIRTUAL_BLOCK_SIZE;
for (int block = 0; block <= numOfBlock; block++) {
hipLaunchKernelGGL(( kernel_phase_one), dim3(1), dim3(dimBlockP1), 0, 0, block, N, d_mat);
hipLaunchKernelGGL(( kernel_phase_two), dim3(dimGridP2), dim3(dimBlockP2), 0, 0, block, N, d_mat);
hipLaunchKernelGGL(( kernel_phase_three), dim3(dimGridP3), dim3(dimBlockP3), 0, 0, block, N, d_mat);
}
hipMemcpy(mat, d_mat, size, hipMemcpyDeviceToHost);
}
| 5145e0dea975c7e01aa64624e71ce174dfd977b0.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
#define BLOCK_SIZE 8
#define THREAD_SIZE 8
#define INF 599999999
__global__ void apspKernel(int N, int k, int *g_idata, int *g_odata) {
// access thread id
const unsigned int tid = threadIdx.x;
// access block id
const unsigned int bid = blockIdx.x;
// access number of threads in this block
const unsigned int bdim = blockDim.x;
const unsigned int i = (bid * bdim + tid)/N;
const unsigned int j = (bid * bdim + tid)%N;
if (g_idata[i*N+k] == -1 || g_idata[k*N+j] == -1) g_odata[i*N+j] = g_idata[i*N+j];
else if (g_idata[i*N+j] == -1) g_odata[i*N+j] = g_idata[i*N+k]+g_idata[k*N+j];
else g_odata[i*N+j] = min(g_idata[i*N+j], g_idata[i*N+k]+g_idata[k*N+j]);
}
void par_apsp(int N, int *mat) {
//copy mat from host to device memory d_mat
int* d_mat;
int* d_mat_out;
int size = sizeof(int) * N * N;
cudaMalloc((void**) &d_mat, size);
cudaMemcpy(d_mat, mat, size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_mat_out, size);
for (int k = 0; k < N; k++) {
apspKernel<<<ceil(N*N/256), 256>>>(N, k, d_mat, d_mat_out);
cudaMemcpy(d_mat, d_mat_out, size, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(mat, d_mat, size, cudaMemcpyDeviceToHost);
}
__global__ void kernel_phase_one(const unsigned int block,
const unsigned int N,
int * const d) {
int i;
int newPath;
const int VIRTUAL_BLOCK_SIZE = BLOCK_SIZE * THREAD_SIZE;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int v1 = VIRTUAL_BLOCK_SIZE * block + ty;
const int v2 = VIRTUAL_BLOCK_SIZE * block + tx;
const int cell = v1 * N + v2;
__shared__ int primary_d[VIRTUAL_BLOCK_SIZE][VIRTUAL_BLOCK_SIZE];
__shared__ int primary_p[VIRTUAL_BLOCK_SIZE][VIRTUAL_BLOCK_SIZE];
if (v1 < N && v2 < N) primary_d[ty][tx] = d[cell];
else primary_d[ty][tx] = INF;
// Synchronize to make sure the all value are loaded in block
__syncthreads();
for (i=0; i<VIRTUAL_BLOCK_SIZE; i++) {
if (primary_d[ty][i] != -1 && primary_d[i][tx] != -1) {
newPath = primary_d[ty][i] + primary_d[i][tx];
if (newPath < primary_d[ty][tx] || primary_d[ty][tx] == -1) primary_d[ty][tx] = newPath;
}
}
if (v1 < N && v2 < N) {
d[cell] = primary_d[ty][tx];
}
}
__global__ void kernel_phase_two(const unsigned int block,
const unsigned int N,
int * const d) {
if (blockIdx.x == block) return;
const int VIRTUAL_BLOCK_SIZE = BLOCK_SIZE * THREAD_SIZE;
int i;
int newPath;
int tx = threadIdx.x;
int ty = threadIdx.y;
int v1 = VIRTUAL_BLOCK_SIZE * block + ty;
int v2 = VIRTUAL_BLOCK_SIZE * block + tx;
__shared__ int primary_d[VIRTUAL_BLOCK_SIZE][VIRTUAL_BLOCK_SIZE];
__shared__ int current_d[VIRTUAL_BLOCK_SIZE][VIRTUAL_BLOCK_SIZE];
const int cell_primary = v1 * N + v2;
if (v1 < N && v2 < N) primary_d[ty][tx] = d[cell_primary];
else primary_d[ty][tx] = INF;
// Load i-aligned singly dependent blocks
if (blockIdx.y == 0) {
v1 = VIRTUAL_BLOCK_SIZE * block + ty;
v2 = VIRTUAL_BLOCK_SIZE * blockIdx.x + tx;
}
// Load j-aligned singly dependent blocks
else {
v1 = VIRTUAL_BLOCK_SIZE * blockIdx.x + ty;
v2 = VIRTUAL_BLOCK_SIZE * block + tx;
}
const int cell_current = v1 * N + v2;
if (v1 < N && v2 < N) current_d[ty][tx] = d[cell_current];
else current_d[ty][tx] = INF;
// Synchronize to make sure the all value are loaded in block
__syncthreads();
// Compute i-aligned singly dependent blocks
if (blockIdx.y == 0)
{
for (i=0; i<VIRTUAL_BLOCK_SIZE; i++) {
if (primary_d[ty][i] != -1 && current_d[i][tx] != -1) {
newPath = primary_d[ty][i] + current_d[i][tx];
if (newPath < current_d[ty][tx] || current_d[ty][tx] == -1) current_d[ty][tx] = newPath;
}
}
}
// Compute j-aligned singly dependent blocks
else {
for (i=0; i<VIRTUAL_BLOCK_SIZE; i++) {
if (current_d[ty][i] != -1 && primary_d[i][tx] != -1) {
newPath = current_d[ty][i] + primary_d[i][tx];
if (newPath < current_d[ty][tx] || current_d[ty][tx] == -1) current_d[ty][tx] = newPath;
}
}
}
if (v1 < N && v2 < N) d[cell_current] = current_d[ty][tx];
}
__global__ void kernel_phase_three(unsigned int block,
const unsigned int N,
int * const d) {
if (blockIdx.x == block || blockIdx.y == block) return;
int i, j, k;
int newPath;
int path;
const int tx = threadIdx.x * THREAD_SIZE;
const int ty = threadIdx.y * THREAD_SIZE;
const int v1 = blockDim.y * blockIdx.y * THREAD_SIZE + ty;
const int v2 = blockDim.x * blockIdx.x * THREAD_SIZE + tx;
int idx, idy;
__shared__ int primaryRow_d[BLOCK_SIZE * THREAD_SIZE][BLOCK_SIZE * THREAD_SIZE];
__shared__ int primaryCol_d[BLOCK_SIZE * THREAD_SIZE][BLOCK_SIZE * THREAD_SIZE];
int v1Row = BLOCK_SIZE * block * THREAD_SIZE + ty;
int v2Col = BLOCK_SIZE * block * THREAD_SIZE + tx;
for (i=0; i<THREAD_SIZE; i++) {
for(j=0; j<THREAD_SIZE; j++) {
idx = tx + j;
idy = ty + i;
if (v1Row + i < N && v2 + j < N) {
block = (v1Row + i) * N + v2 + j;
primaryRow_d[idy][idx] = d[block];
}
else {
primaryRow_d[idy][idx] = INF;
}
if (v1 + i < N && v2Col + j < N) {
block = (v1 + i) * N + v2Col + j;
primaryCol_d[idy][idx] = d[block];
}
else {
primaryCol_d[idy][idx] = INF;
}
}
}
// Synchronize to make sure the all value are loaded in virtual block
__syncthreads();
for (i=0; i<THREAD_SIZE; i++) {
for (j=0; j<THREAD_SIZE; j++) {
if (v1 + i < N && v2 + j < N) {
block = (v1 + i) * N + v2 + j;
path = d[block];
idy = ty + i;
idx = tx + j;
for (k=0; k<BLOCK_SIZE * THREAD_SIZE; k++) {
if (primaryCol_d[idy][k] != -1 && primaryRow_d[k][idx] != -1) {
newPath = primaryCol_d[idy][k] + primaryRow_d[k][idx];
if (path > newPath || path == -1) {
path = newPath;
}
}
}
d[block] = path;
}
}
}
}
void par_apsp_blocked_processing(int N, int *mat) {
//copy mat from host to device memory d_mat
int* d_mat;
int size = sizeof(int) * N * N;
cudaMalloc((void**) &d_mat, size);
cudaMemcpy(d_mat, mat, size, cudaMemcpyHostToDevice);
const int VIRTUAL_BLOCK_SIZE = BLOCK_SIZE * THREAD_SIZE;
// Initialize the grid and block dimensions here
dim3 dimGridP1(1, 1, 1);
dim3 dimGridP2((N - 1) / VIRTUAL_BLOCK_SIZE + 1, 2 , 1);
dim3 dimGridP3((N - 1) / VIRTUAL_BLOCK_SIZE + 1, (N - 1) / VIRTUAL_BLOCK_SIZE + 1, 1);
dim3 dimBlockP1(VIRTUAL_BLOCK_SIZE, VIRTUAL_BLOCK_SIZE, 1);
dim3 dimBlockP2(VIRTUAL_BLOCK_SIZE, VIRTUAL_BLOCK_SIZE, 1);
dim3 dimBlockP3(BLOCK_SIZE, BLOCK_SIZE, 1);
int numOfBlock = (N - 1) / VIRTUAL_BLOCK_SIZE;
for (int block = 0; block <= numOfBlock; block++) {
kernel_phase_one<<<1, dimBlockP1>>>(block, N, d_mat);
kernel_phase_two<<<dimGridP2, dimBlockP2>>>(block, N, d_mat);
kernel_phase_three<<<dimGridP3, dimBlockP3>>>(block, N, d_mat);
}
cudaMemcpy(mat, d_mat, size, cudaMemcpyDeviceToHost);
}
|
5e9cb2dde7cd929b594d3a9e9e4f8c6888646c1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "heat2d.h"
void Manage_Devices(){
// we set devices before MPI init to make sure MPI is aware of the cuda devices!
char * localRankStr = NULL;
int rank = 0, devCount = 0;
// We extract the local rank initialization using an environment variable
if ((localRankStr = getenv(ENV_LOCAL_RANK)) != NULL) { rank = atoi(localRankStr); }
hipGetDeviceCount(&devCount);
//hipSetDevice(rank/devCount);
hipSetDevice(rank);
printf("number of devices found: %d\n",devCount);
}
dmn Manage_Domain(int rank, int npcs, int gpu){
// allocate sub-domain for a one-dimensional domain decomposition in the Y-direction
dmn domain;
domain.gpu = gpu;
domain.rank = rank;
domain.npcs = npcs;
domain.nx = NX/1;
domain.ny = NY/SY;
domain.size = domain.nx*domain.ny;
domain.rx = 0;
domain.ry = rank;
hipSetDevice(domain.gpu);
// Have process 0 print out some information.
if (rank==ROOT) {
printf ("HEAT_MPI:\n\n" );
printf (" C++/MPI version\n" );
printf (" Solve the 2D time-dependent heat equation.\n\n" );
}
// Print welcome message
printf (" Commence Simulation: cpu rank %d out of %d cores with GPU(%d)"
" working with (%d +0) x (%d +%d) cells\n",rank,npcs,gpu,domain.nx,domain.ny,2*R);
// return the domain structure
return domain;
}
void Manage_Memory(int phase, dmn domain, real **h_u, real **t_u, real **d_u, real **d_un){
size_t global = NY*NX*sizeof(real);
size_t local = (domain.nx+0*R)*(domain.ny+2*R)*sizeof(real);
hipError_t Error;
if (phase==0) {
// Allocate global domain on ROOT
if (domain.rank==ROOT) *h_u=(real*)malloc(global);
// Allocate local domains on MPI threats with 2 extra slots for halo regions
*t_u =(real*)malloc(local);
// Allocate local domains on devices with 2 extra slots for halo regions
Error = hipSetDevice(domain.gpu); if (DEBUG) printf("CUDA error (hipSetDevice) = %s\n",hipGetErrorString(Error));
Error = hipMalloc((void**)d_u ,local); if (DEBUG) printf("CUDA error (hipMalloc d_u) = %s\n",hipGetErrorString(Error));
Error = hipMalloc((void**)d_un,local); if (DEBUG) printf("CUDA error (hipMalloc d_un) = %s\n",hipGetErrorString(Error));
}
if (phase==1){
// Free local domain variable on device
//Error = hipSetDevice(domain.gpu); if (DEBUG) printf("CUDA error (hipSetDevice) = %s\n",hipGetErrorString(Error));
Error = hipFree(*d_u ); if (DEBUG) printf("CUDA error (hipFree d_u) = %s\n",hipGetErrorString(Error));
Error = hipFree(*d_un); if (DEBUG) printf("CUDA error (hipFree d_un) = %s\n",hipGetErrorString(Error));
// Free the local domain on host
free(*t_u);
}
if (phase==2) {
// Free global domain on ROOT
if (domain.rank==ROOT) free(*h_u);
}
}
/******************************/
/* TEMPERATURE INITIALIZATION */
/******************************/
void Call_IC(const int IC, real * __restrict u0){
int i, j, o;
switch (IC) {
case 1: {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = 0.0; // bottom
if (i==0) u0[o] = 0.0; // left
if (j==NY-1) u0[o] = 1.0; // top
if (i==NX-1) u0[o] = 1.0; // right
}
}
break;
}
case 2: {
float u_bl = 0.7f;
float u_br = 1.0f;
float u_tl = 0.7f;
float u_tr = 1.0f;
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = u_bl + (u_br-u_bl)*i/(NX-1); // bottom
if (j==NY-1) u0[o] = u_tl + (u_tr-u_tl)*i/(NX-1); // top
if (i==0) u0[o] = u_bl + (u_tl-u_bl)*j/(NY-1); // left
if (i==NX-1) u0[o] = u_br + (u_tr-u_br)*j/(NY-1); // right
}
}
break;
}
case 3: {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set left wall to 1
if (i==NX-1) u0[o] = 1.0;
}
}
break;
}
// here to add another IC
}
}
void Save_Results(real *u){
// print result to txt file
FILE *pFile = fopen("result.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < NY; j++) {
for (int i = 0; i < NX; i++) {
fprintf(pFile, "%d\t %d\t %g\n",j,i,u[i+NX*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
void Print_SubDomain(dmn domain, real *u){
// print result to terminal
for (int j = 0; j < domain.ny+2*R; j++) {
for (int i = 0; i < domain.nx+2*0; i++) {
printf("%1.2f ",u[i+domain.nx*j]);
}
printf("\n");
}
printf("\n");
}
void Print_Domain(dmn domain, real *u){
// print result to terminal
for (int j = 0; j < NY; j++) {
for (int i = 0; i < NX; i++) {
printf("%1.2f ",u[i+NX*j]);
}
printf("\n");
}
printf("\n");
}
__global__ void Set_DirichletBC(const int m, const int rank, real * __restrict__ u){
// Threads id
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i<NX) {
if (rank==0) { /* bottom BC */
float u_bl = 0.7f;
float u_br = 1.0f;
int j = 1;
u[i+NX*j] = u_bl + (u_br-u_bl)*i/(NX-1);
}
if (rank==SY-1) { /* top BC */
float u_tl = 0.7f;
float u_tr = 1.0f;
int j = m;
u[i+NX*j] = u_tl + (u_tr-u_tl)*i/(NX-1);
}
}
}
void Manage_Comms(int phase, dmn domain, real **t_u, real **d_u) {
hipError_t Error;
if (phase==0) {
// Send local domains to their associated GPU
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d) :::\n",phase);
Error=hipMemcpy(*d_u,*t_u,(domain.nx+0*R)*(domain.ny+2*R)*sizeof(real),hipMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",hipGetErrorString(Error));
}
if (phase==1) {
// Communicate halo regions
const int n = domain.nx;
const int m = domain.ny;
MPI_Status status;
MPI_Request rqSendUp, rqSendDown, rqRecvUp, rqRecvDown;
// Impose BCs!
int blockSize = 256, gridSize = 1+(n-1)/blockSize;
hipLaunchKernelGGL(( Set_DirichletBC), dim3(gridSize),dim3(blockSize), 0, 0, m,domain.ry,*d_u);
// Communicate halo regions
if (domain.ry <SY-1) {
MPI_Isend(*d_u+n*m ,n,MPI_CUSTOM_REAL,domain.ry+1,1,MPI_COMM_WORLD,&rqSendDown); // send u[rowM-1] to rank+1
MPI_Irecv(*d_u+n*(m+R),n,MPI_CUSTOM_REAL,domain.ry+1,0,MPI_COMM_WORLD,&rqRecvUp ); // recv u[row M ] from rank+1
}
if (domain.ry > 0 ) {
MPI_Isend(*d_u+n ,n,MPI_CUSTOM_REAL,domain.ry-1,0,MPI_COMM_WORLD,&rqSendUp ); // send u[row 1 ] to rank-1
MPI_Irecv(*d_u ,n,MPI_CUSTOM_REAL,domain.ry-1,1,MPI_COMM_WORLD,&rqRecvDown); // recv u[row 0 ] from rank-1
}
// Wait for process to complete
if(domain.ry <SY-1) {
MPI_Wait(&rqSendDown, &status);
MPI_Wait(&rqRecvUp, &status);
}
if(domain.ry > 0 ) {
MPI_Wait(&rqRecvDown, &status);
MPI_Wait(&rqSendUp, &status);
}
}
if (phase==2) {
// Collect local domains from their associated GPU
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d) :::\n",phase);
Error=hipMemcpy(*t_u,*d_u,(domain.nx+0*R)*(domain.ny+2*R)*sizeof(real),hipMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",hipGetErrorString(Error));
}
}
__global__ void Laplace2d(const int ny, const int ry, const real * __restrict__ u, real * __restrict__ un){
int o, n, s, e, w;
// Threads id
const int i = threadIdx.x + blockIdx.x*blockDim.x;
const int j = threadIdx.y + blockIdx.y*blockDim.y;
o = i+(NX*j); // node( j,i ) n
n = o + NX; // node(j+1,i) |
s = o - NX; // node(j-1,i) w--o--e
e = o + 1; // node(j,i+1) |
w = o - 1; // node(j,i-1) s
// only update "interior" nodes
if(i>0 && i<NX-1 && j>0 && j<ny-1) {
un[o] = u[o] + KX*(u[e]-2*u[o]+u[w]) + KY*(u[n]-2*u[o]+u[s]);
} else {
un[o] = u[o];
}
}
extern "C" void Call_Laplace(dmn domain, real **u, real **un){
// Produce one iteration of the laplace operator
int tx=32, ty=32; // number of threads in x and y directions
dim3 blockSize(tx,ty); dim3 numBlocks((domain.nx+tx-1)/tx,(domain.ny+ty-1)/ty);
hipLaunchKernelGGL(( Laplace2d), dim3(numBlocks),dim3(blockSize), 0, 0, domain.ny+2*R,domain.ry,*u,*un);
if (DEBUG) printf("CUDA error (Laplace2d) %s\n",hipGetErrorString(hipPeekAtLastError()));
hipError_t Error = hipDeviceSynchronize();
if (DEBUG) printf("CUDA error (Laplace2d Synchronize) %s\n",hipGetErrorString(Error));
}
| 5e9cb2dde7cd929b594d3a9e9e4f8c6888646c1c.cu |
#include "heat2d.h"
void Manage_Devices(){
// we set devices before MPI init to make sure MPI is aware of the cuda devices!
char * localRankStr = NULL;
int rank = 0, devCount = 0;
// We extract the local rank initialization using an environment variable
if ((localRankStr = getenv(ENV_LOCAL_RANK)) != NULL) { rank = atoi(localRankStr); }
cudaGetDeviceCount(&devCount);
//cudaSetDevice(rank/devCount);
cudaSetDevice(rank);
printf("number of devices found: %d\n",devCount);
}
dmn Manage_Domain(int rank, int npcs, int gpu){
// allocate sub-domain for a one-dimensional domain decomposition in the Y-direction
dmn domain;
domain.gpu = gpu;
domain.rank = rank;
domain.npcs = npcs;
domain.nx = NX/1;
domain.ny = NY/SY;
domain.size = domain.nx*domain.ny;
domain.rx = 0;
domain.ry = rank;
cudaSetDevice(domain.gpu);
// Have process 0 print out some information.
if (rank==ROOT) {
printf ("HEAT_MPI:\n\n" );
printf (" C++/MPI version\n" );
printf (" Solve the 2D time-dependent heat equation.\n\n" );
}
// Print welcome message
printf (" Commence Simulation: cpu rank %d out of %d cores with GPU(%d)"
" working with (%d +0) x (%d +%d) cells\n",rank,npcs,gpu,domain.nx,domain.ny,2*R);
// return the domain structure
return domain;
}
void Manage_Memory(int phase, dmn domain, real **h_u, real **t_u, real **d_u, real **d_un){
size_t global = NY*NX*sizeof(real);
size_t local = (domain.nx+0*R)*(domain.ny+2*R)*sizeof(real);
cudaError_t Error;
if (phase==0) {
// Allocate global domain on ROOT
if (domain.rank==ROOT) *h_u=(real*)malloc(global);
// Allocate local domains on MPI threats with 2 extra slots for halo regions
*t_u =(real*)malloc(local);
// Allocate local domains on devices with 2 extra slots for halo regions
Error = cudaSetDevice(domain.gpu); if (DEBUG) printf("CUDA error (cudaSetDevice) = %s\n",cudaGetErrorString(Error));
Error = cudaMalloc((void**)d_u ,local); if (DEBUG) printf("CUDA error (cudaMalloc d_u) = %s\n",cudaGetErrorString(Error));
Error = cudaMalloc((void**)d_un,local); if (DEBUG) printf("CUDA error (cudaMalloc d_un) = %s\n",cudaGetErrorString(Error));
}
if (phase==1){
// Free local domain variable on device
//Error = cudaSetDevice(domain.gpu); if (DEBUG) printf("CUDA error (cudaSetDevice) = %s\n",cudaGetErrorString(Error));
Error = cudaFree(*d_u ); if (DEBUG) printf("CUDA error (cudaFree d_u) = %s\n",cudaGetErrorString(Error));
Error = cudaFree(*d_un); if (DEBUG) printf("CUDA error (cudaFree d_un) = %s\n",cudaGetErrorString(Error));
// Free the local domain on host
free(*t_u);
}
if (phase==2) {
// Free global domain on ROOT
if (domain.rank==ROOT) free(*h_u);
}
}
/******************************/
/* TEMPERATURE INITIALIZATION */
/******************************/
void Call_IC(const int IC, real * __restrict u0){
int i, j, o;
switch (IC) {
case 1: {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = 0.0; // bottom
if (i==0) u0[o] = 0.0; // left
if (j==NY-1) u0[o] = 1.0; // top
if (i==NX-1) u0[o] = 1.0; // right
}
}
break;
}
case 2: {
float u_bl = 0.7f;
float u_br = 1.0f;
float u_tl = 0.7f;
float u_tr = 1.0f;
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = u_bl + (u_br-u_bl)*i/(NX-1); // bottom
if (j==NY-1) u0[o] = u_tl + (u_tr-u_tl)*i/(NX-1); // top
if (i==0) u0[o] = u_bl + (u_tl-u_bl)*j/(NY-1); // left
if (i==NX-1) u0[o] = u_br + (u_tr-u_br)*j/(NY-1); // right
}
}
break;
}
case 3: {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set left wall to 1
if (i==NX-1) u0[o] = 1.0;
}
}
break;
}
// here to add another IC
}
}
void Save_Results(real *u){
// print result to txt file
FILE *pFile = fopen("result.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < NY; j++) {
for (int i = 0; i < NX; i++) {
fprintf(pFile, "%d\t %d\t %g\n",j,i,u[i+NX*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
void Print_SubDomain(dmn domain, real *u){
// print result to terminal
for (int j = 0; j < domain.ny+2*R; j++) {
for (int i = 0; i < domain.nx+2*0; i++) {
printf("%1.2f ",u[i+domain.nx*j]);
}
printf("\n");
}
printf("\n");
}
void Print_Domain(dmn domain, real *u){
// print result to terminal
for (int j = 0; j < NY; j++) {
for (int i = 0; i < NX; i++) {
printf("%1.2f ",u[i+NX*j]);
}
printf("\n");
}
printf("\n");
}
__global__ void Set_DirichletBC(const int m, const int rank, real * __restrict__ u){
// Threads id
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i<NX) {
if (rank==0) { /* bottom BC */
float u_bl = 0.7f;
float u_br = 1.0f;
int j = 1;
u[i+NX*j] = u_bl + (u_br-u_bl)*i/(NX-1);
}
if (rank==SY-1) { /* top BC */
float u_tl = 0.7f;
float u_tr = 1.0f;
int j = m;
u[i+NX*j] = u_tl + (u_tr-u_tl)*i/(NX-1);
}
}
}
void Manage_Comms(int phase, dmn domain, real **t_u, real **d_u) {
cudaError_t Error;
if (phase==0) {
// Send local domains to their associated GPU
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d) :::\n",phase);
Error=cudaMemcpy(*d_u,*t_u,(domain.nx+0*R)*(domain.ny+2*R)*sizeof(real),cudaMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",cudaGetErrorString(Error));
}
if (phase==1) {
// Communicate halo regions
const int n = domain.nx;
const int m = domain.ny;
MPI_Status status;
MPI_Request rqSendUp, rqSendDown, rqRecvUp, rqRecvDown;
// Impose BCs!
int blockSize = 256, gridSize = 1+(n-1)/blockSize;
Set_DirichletBC<<<gridSize,blockSize>>>(m,domain.ry,*d_u);
// Communicate halo regions
if (domain.ry <SY-1) {
MPI_Isend(*d_u+n*m ,n,MPI_CUSTOM_REAL,domain.ry+1,1,MPI_COMM_WORLD,&rqSendDown); // send u[rowM-1] to rank+1
MPI_Irecv(*d_u+n*(m+R),n,MPI_CUSTOM_REAL,domain.ry+1,0,MPI_COMM_WORLD,&rqRecvUp ); // recv u[row M ] from rank+1
}
if (domain.ry > 0 ) {
MPI_Isend(*d_u+n ,n,MPI_CUSTOM_REAL,domain.ry-1,0,MPI_COMM_WORLD,&rqSendUp ); // send u[row 1 ] to rank-1
MPI_Irecv(*d_u ,n,MPI_CUSTOM_REAL,domain.ry-1,1,MPI_COMM_WORLD,&rqRecvDown); // recv u[row 0 ] from rank-1
}
// Wait for process to complete
if(domain.ry <SY-1) {
MPI_Wait(&rqSendDown, &status);
MPI_Wait(&rqRecvUp, &status);
}
if(domain.ry > 0 ) {
MPI_Wait(&rqRecvDown, &status);
MPI_Wait(&rqSendUp, &status);
}
}
if (phase==2) {
// Collect local domains from their associated GPU
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d) :::\n",phase);
Error=cudaMemcpy(*t_u,*d_u,(domain.nx+0*R)*(domain.ny+2*R)*sizeof(real),cudaMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",cudaGetErrorString(Error));
}
}
__global__ void Laplace2d(const int ny, const int ry, const real * __restrict__ u, real * __restrict__ un){
int o, n, s, e, w;
// Threads id
const int i = threadIdx.x + blockIdx.x*blockDim.x;
const int j = threadIdx.y + blockIdx.y*blockDim.y;
o = i+(NX*j); // node( j,i ) n
n = o + NX; // node(j+1,i) |
s = o - NX; // node(j-1,i) w--o--e
e = o + 1; // node(j,i+1) |
w = o - 1; // node(j,i-1) s
// only update "interior" nodes
if(i>0 && i<NX-1 && j>0 && j<ny-1) {
un[o] = u[o] + KX*(u[e]-2*u[o]+u[w]) + KY*(u[n]-2*u[o]+u[s]);
} else {
un[o] = u[o];
}
}
extern "C" void Call_Laplace(dmn domain, real **u, real **un){
// Produce one iteration of the laplace operator
int tx=32, ty=32; // number of threads in x and y directions
dim3 blockSize(tx,ty); dim3 numBlocks((domain.nx+tx-1)/tx,(domain.ny+ty-1)/ty);
Laplace2d<<<numBlocks,blockSize>>>(domain.ny+2*R,domain.ry,*u,*un);
if (DEBUG) printf("CUDA error (Laplace2d) %s\n",cudaGetErrorString(cudaPeekAtLastError()));
cudaError_t Error = cudaDeviceSynchronize();
if (DEBUG) printf("CUDA error (Laplace2d Synchronize) %s\n",cudaGetErrorString(Error));
}
|
9703b3ceab0d178ce17c2b51d0d3e2c01083b521.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void histogram( int * hist_out, unsigned char * img_in, int img_w,int img_h, int nbr_bin){
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
unsigned int col= tx + blockDim.x * bx;
unsigned int row= ty + blockDim.y * by;
int grid_width = gridDim.x * blockDim.x;
int id = row * grid_width + col;
if(id<nbr_bin)
hist_out[id] = 0;
__syncthreads();
if(row<img_w && col<img_h)
atomicAdd( &(hist_out[img_in[id]]), 1);
} | 9703b3ceab0d178ce17c2b51d0d3e2c01083b521.cu | #include "includes.h"
__global__ void histogram( int * hist_out, unsigned char * img_in, int img_w,int img_h, int nbr_bin){
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
unsigned int col= tx + blockDim.x * bx;
unsigned int row= ty + blockDim.y * by;
int grid_width = gridDim.x * blockDim.x;
int id = row * grid_width + col;
if(id<nbr_bin)
hist_out[id] = 0;
__syncthreads();
if(row<img_w && col<img_h)
atomicAdd( &(hist_out[img_in[id]]), 1);
} |
219e1134e9a72f2b967eb1e3acd5fd6803bd55dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*SmoothSeq SmoothSeq.cpp `pkg-config --cflags --libs opencv`
./SmoothSeq image_in type_img image_out
type_img -> 0 = GRAYSCALE
type_img -> 1 = COLOR
*/
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <vector>
#include <cmath>
#include <sys/time.h>
#include <math.h>
using namespace cv;
using namespace std;
//Funo que calcula a mdia de uma "matriz" 5x5 a partir de uma dada posio
__global__ void smooth( unsigned char *entrada,unsigned char *saida, int n_linhas, int n_colunas ) {
//Calcula a posio no vetor (id_bloco * total_blocos + id_thread)
float media;
int posicao = blockIdx.x * blockDim.x + threadIdx.x;
//Se a posio no maior que o limite da imagem original...
if(posicao < (n_linhas)*(n_colunas)) {
//soma o valor da regio 5x5 em torno no pixel
saida[posicao] =entrada[posicao]+
entrada[posicao+(n_colunas+4)]+
entrada[posicao+(2*(n_colunas+4))]+
entrada[posicao+(3*(n_colunas+4))]+
entrada[posicao+(4*(n_colunas+4))]+
entrada[posicao+1]+
entrada[posicao+(n_colunas+4)+1]+
entrada[posicao+(2*(n_colunas+4))+1]+
entrada[posicao+(3*(n_colunas+4))+1]+
entrada[posicao+(4*(n_colunas+4))+1]+
entrada[posicao+2]+
entrada[posicao+(n_colunas+4)+2]+
entrada[posicao+(2*(n_colunas+4))+2]+
entrada[posicao+(3*(n_colunas+4))+2]+
entrada[posicao+(4*(n_colunas+4))+2]+
entrada[posicao+3]+
entrada[posicao+(n_colunas+4)+3]+
entrada[posicao+(2*(n_colunas+4))+3]+
entrada[posicao+(3*(n_colunas+4))+3]+
entrada[posicao+(4*(n_colunas+4))+3]+
entrada[posicao+4]+
entrada[posicao+(n_colunas+4)+4]+
entrada[posicao+(2*(n_colunas+4))+4]+
entrada[posicao+(3*(n_colunas+4))+4]+
entrada[posicao+(4*(n_colunas+4))+4];
//calcula a mdia
media = (unsigned char) saida[posicao]/25;
saida[posicao] = media;
}
}
int main(int argc, char *argv[]) {
//diz se a imagem grayscale or color
int tipo_img = atoi(argv[2]);
//arquivo de entrada
const char *fileIn, *fileOut;
//numero maximo de threads da placa do andromeda
int nthreads = 1024;
int numBlocks;
//matriz com a imagem de entrada
Mat in;
//matriz que receber a imagem de saida
Mat out;
//le o nome da imagem
fileIn = argv[1];
fileOut = argv[3];
//le e salva a imagem na matriz
if(tipo_img == 0) {
in = imread(fileIn, CV_LOAD_IMAGE_GRAYSCALE);
} else if(tipo_img == 1) {
in = imread(fileIn, CV_LOAD_IMAGE_COLOR);
} else {
cout << "Tipo de imagem nao suportado" << endl;
return -1;
}
//caso nao consegui abrir a imagem
if (in.empty()) {
cout << "Nao foi possivel abrir a imagem: " << endl;
return -1;
}
int l_height = in.size().height, l_width = in.size().width;
//numero de blocos o total de pixels dividido pelo total de threads
numBlocks = ceil((l_height*l_width)/nthreads);
unsigned char *original,*saida;
//Malloc especial do CUDA, para os vetores originais e de sada
//Estes vetores so passados s funes que sero calculadas pela
//placa de vdeo
hipMalloc(&original, (l_width + 4) * (l_height + 4));
hipMalloc(&saida, l_width * l_height);
//pegar o tempo de inicio
struct timeval inicio, fim;
gettimeofday(&inicio,0);
hipMemcpy(original, in.data,l_width * l_height, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( smooth), dim3(numBlocks),dim3(nthreads), 0, 0, original, saida, l_height, l_width);
out = Mat::zeros(int.size (), int.type());
hipMemcpy(out.data, saida, l_width*l_height,hipMemcpyDeviceToHost);
//pega o tempo de fim, faz a diferena e imprime na tela
gettimeofday(&fim,0);
float speedup = (fim.tv_sec + fim.tv_usec/1000000.0) - (inicio.tv_sec + inicio.tv_usec/1000000.0);
cout << speedup << endl;
imwrite(fileOut, out);
in.release();
out.release();
hipFree(original);
hipFree(saida);
return 0;
}
| 219e1134e9a72f2b967eb1e3acd5fd6803bd55dc.cu | /*SmoothSeq SmoothSeq.cpp `pkg-config --cflags --libs opencv`
./SmoothSeq image_in type_img image_out
type_img -> 0 = GRAYSCALE
type_img -> 1 = COLOR
*/
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <vector>
#include <cmath>
#include <sys/time.h>
#include <math.h>
using namespace cv;
using namespace std;
//Função que calcula a média de uma "matriz" 5x5 a partir de uma dada posição
__global__ void smooth( unsigned char *entrada,unsigned char *saida, int n_linhas, int n_colunas ) {
//Calcula a posição no vetor (id_bloco * total_blocos + id_thread)
float media;
int posicao = blockIdx.x * blockDim.x + threadIdx.x;
//Se a posição não é maior que o limite da imagem original...
if(posicao < (n_linhas)*(n_colunas)) {
//soma o valor da região 5x5 em torno no pixel
saida[posicao] =entrada[posicao]+
entrada[posicao+(n_colunas+4)]+
entrada[posicao+(2*(n_colunas+4))]+
entrada[posicao+(3*(n_colunas+4))]+
entrada[posicao+(4*(n_colunas+4))]+
entrada[posicao+1]+
entrada[posicao+(n_colunas+4)+1]+
entrada[posicao+(2*(n_colunas+4))+1]+
entrada[posicao+(3*(n_colunas+4))+1]+
entrada[posicao+(4*(n_colunas+4))+1]+
entrada[posicao+2]+
entrada[posicao+(n_colunas+4)+2]+
entrada[posicao+(2*(n_colunas+4))+2]+
entrada[posicao+(3*(n_colunas+4))+2]+
entrada[posicao+(4*(n_colunas+4))+2]+
entrada[posicao+3]+
entrada[posicao+(n_colunas+4)+3]+
entrada[posicao+(2*(n_colunas+4))+3]+
entrada[posicao+(3*(n_colunas+4))+3]+
entrada[posicao+(4*(n_colunas+4))+3]+
entrada[posicao+4]+
entrada[posicao+(n_colunas+4)+4]+
entrada[posicao+(2*(n_colunas+4))+4]+
entrada[posicao+(3*(n_colunas+4))+4]+
entrada[posicao+(4*(n_colunas+4))+4];
//calcula a média
media = (unsigned char) saida[posicao]/25;
saida[posicao] = media;
}
}
int main(int argc, char *argv[]) {
//diz se a imagem é grayscale or color
int tipo_img = atoi(argv[2]);
//arquivo de entrada
const char *fileIn, *fileOut;
//numero maximo de threads da placa do andromeda
int nthreads = 1024;
int numBlocks;
//matriz com a imagem de entrada
Mat in;
//matriz que receberá a imagem de saida
Mat out;
//le o nome da imagem
fileIn = argv[1];
fileOut = argv[3];
//le e salva a imagem na matriz
if(tipo_img == 0) {
in = imread(fileIn, CV_LOAD_IMAGE_GRAYSCALE);
} else if(tipo_img == 1) {
in = imread(fileIn, CV_LOAD_IMAGE_COLOR);
} else {
cout << "Tipo de imagem nao suportado" << endl;
return -1;
}
//caso nao consegui abrir a imagem
if (in.empty()) {
cout << "Nao foi possivel abrir a imagem: " << endl;
return -1;
}
int l_height = in.size().height, l_width = in.size().width;
//numero de blocos é o total de pixels dividido pelo total de threads
numBlocks = ceil((l_height*l_width)/nthreads);
unsigned char *original,*saida;
//Malloc especial do CUDA, para os vetores originais e de saída
//Estes vetores são passados às funções que serão calculadas pela
//placa de vídeo
cudaMalloc(&original, (l_width + 4) * (l_height + 4));
cudaMalloc(&saida, l_width * l_height);
//pegar o tempo de inicio
struct timeval inicio, fim;
gettimeofday(&inicio,0);
cudaMemcpy(original, in.data,l_width * l_height, cudaMemcpyHostToDevice);
smooth<<<numBlocks,nthreads>>>(original, saida, l_height, l_width);
out = Mat::zeros(int.size (), int.type());
cudaMemcpy(out.data, saida, l_width*l_height,cudaMemcpyDeviceToHost);
//pega o tempo de fim, faz a diferença e imprime na tela
gettimeofday(&fim,0);
float speedup = (fim.tv_sec + fim.tv_usec/1000000.0) - (inicio.tv_sec + inicio.tv_usec/1000000.0);
cout << speedup << endl;
imwrite(fileOut, out);
in.release();
out.release();
cudaFree(original);
cudaFree(saida);
return 0;
}
|
cc986835efcd13874678ea5c71b5626c4e7a889f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
static const int THREADS = 28;
static const int SLICES = 1;
__global__ void grey_scale(char* imageData, int rows, int columns, int channels) {
int thread_id = threadIdx.x;
int from = rows/THREADS * thread_id;
int to = rows/THREADS * (thread_id + 1);
for (int x = from; x < to; x++) {
for (int y = 0; y < columns; y++) {
auto rgb = imageData[x * columns * channels + y * channels] * 0.3f;
rgb += imageData[x * columns * channels + y * channels + 1] * 0.59f;
rgb += imageData[x * columns * channels + y * channels + 2] * 0.11f;
imageData[x * columns * channels + y * channels] = (char)(rgb);
imageData[x * columns * channels + y * channels + 1] = (char)(imageData[x * columns * channels + y * channels]);
imageData[x * columns * channels + y * channels + 2] = (char)(imageData[x * columns * channels + y * channels]);
}
}
}
void image_cuda(char *imageData, size_t size, int rows, int cols, int channels) {
char *dev_image;
hipMalloc((void **)&dev_image, size);
hipMemcpy(dev_image, imageData, size, hipMemcpyHostToDevice);
grey_scale << < SLICES, THREADS >> > (dev_image, rows, cols, channels);
hipMemcpy(imageData, dev_image, size, hipMemcpyDeviceToHost);
hipFree(dev_image);
} | cc986835efcd13874678ea5c71b5626c4e7a889f.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
static const int THREADS = 28;
static const int SLICES = 1;
__global__ void grey_scale(char* imageData, int rows, int columns, int channels) {
int thread_id = threadIdx.x;
int from = rows/THREADS * thread_id;
int to = rows/THREADS * (thread_id + 1);
for (int x = from; x < to; x++) {
for (int y = 0; y < columns; y++) {
auto rgb = imageData[x * columns * channels + y * channels] * 0.3f;
rgb += imageData[x * columns * channels + y * channels + 1] * 0.59f;
rgb += imageData[x * columns * channels + y * channels + 2] * 0.11f;
imageData[x * columns * channels + y * channels] = (char)(rgb);
imageData[x * columns * channels + y * channels + 1] = (char)(imageData[x * columns * channels + y * channels]);
imageData[x * columns * channels + y * channels + 2] = (char)(imageData[x * columns * channels + y * channels]);
}
}
}
void image_cuda(char *imageData, size_t size, int rows, int cols, int channels) {
char *dev_image;
cudaMalloc((void **)&dev_image, size);
cudaMemcpy(dev_image, imageData, size, cudaMemcpyHostToDevice);
grey_scale << < SLICES, THREADS >> > (dev_image, rows, cols, channels);
cudaMemcpy(imageData, dev_image, size, cudaMemcpyDeviceToHost);
cudaFree(dev_image);
} |
e817728f23cd063f1224ab5673014b86fd4c391e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaLK.h"
#include <stdio.h>
const float scaling[] = {1, 0.5f, 0.25f, 0.125f, 0.0625f, 0.03125f, 0.015625f, 0.0078125f};
// Can't use an array of texture<> !! so we'll just re-use the one texture buffer for each image
texture<float, 2, hipReadModeElementType> texRef_pyramid_prev;
texture<float, 2, hipReadModeElementType> texRef_pyramid_cur;
__global__ void convertToGrey(unsigned char *d_in, float *d_out, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < N)
d_out[idx] = d_in[idx*3]*0.1144f + d_in[idx*3+1]*0.5867f + d_in[idx*3+2]*0.2989f;
}
__global__ void pyrDownsample(float *in, int w1, int h1, float *out, int w2, int h2)
{
// Input has to be greyscale
int x2 = blockIdx.x*blockDim.x + threadIdx.x;
int y2 = blockIdx.y*blockDim.y + threadIdx.y;
if( (x2 < w2) && (y2 < h2) ) {
int x = x2*2;
int y = y2*2;
int x_1 = x-1;
int y_1 = y-1;
int x_2 = x+1;
int y_2 = y+1;
if(x_1 < 0) x_1 = 0;
if(y_1 < 0) y_1 = 0;
if(x_2 >= w1) x_2 = w1 - 1;
if(y_2 >= h1) y_2 = h1 - 1;
out[y2*w2 + x2] = 0.25f*in[y*w1+x] + 0.125f*(in[y*w1+x_1] + in[y*w1+x_2] + in[y_1*w1+x] + in[y_2*w1+x]) +
0.0625f*(in[y_1*w1+x_1] + in[y_2*w1+x_1] + in[y_1*w1+x_2] + in[y_2*w1+x_2]);
}
}
__global__ void smoothX(float *in, int w, int h, float *out)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= w || y >= h)
return;
int idx = y*w;
int a = x-2;
int b = x-1;
int c = x;
int d = x+1;
int e = x+2;
if(a < 0) a = 0;
if(b < 0) b = 0;
if(c >= w) c = w-1;
if(d >= w) d = w-1;
out[y*w+x] = 0.0625f*in[idx+a] + 0.25f*in[idx+b] + 0.375f*in[idx+c] + 0.25f*in[idx+d] + 0.0625f*in[idx+e];
}
__global__ void smoothY(float *in, int w, int h, float *out)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= w || y >= h)
return;
int a = y-2;
int b = y-1;
int c = y;
int d = y+1;
int e = y+2;
if(a < 0) a = 0;
if(b < 0) b = 0;
if(c >= h) c = h-1;
if(d >= h) d = h-1;
out[y*w+x] = 0.0625f*in[a*w+x] + 0.25f*in[b*w+x] + 0.375f*in[c*w+x] + 0.25f*in[d*w+x] + 0.0625f*in[e*w+x];
}
// Call recursively
// w/h - original dimension of image
__global__ void track(const int w, const int h,
const int pyr_w, const int pyr_h,
float scaling, int level, char initGuess,
float *dx, float *dy, char *status)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int idx = y*w + x;
if(x > w-1 || y > h-1)
return;
if(status[idx] == 0)
return;
float prev_x = x*scaling;
float prev_y = y*scaling;
float Vx, Vy;
float cur_x, cur_y;
float sum_Ixx = 0;
float sum_Ixy = 0;
float sum_Iyy = 0;
float sum_Ixt;
float sum_Iyt;
float Ix, Iy, It;
int xx, yy;
float det, D;
float I, J;
float vx, vy;
int j;
if(initGuess) {
Vx = 0;
Vy = 0;
cur_x = prev_x;
cur_y = prev_y;
}
else {
Vx = dx[idx];
Vy = dy[idx];
cur_x = prev_x + Vx;
cur_y = prev_y + Vy;
}
// Calculate spatial gradient
for(yy=-PATCH_R; yy <= PATCH_R; yy++) {
for(xx=-PATCH_R; xx <= PATCH_R; xx++) {
Ix = (tex2D(texRef_pyramid_prev, prev_x + xx+1, prev_y + yy) - tex2D(texRef_pyramid_prev, prev_x + xx-1, prev_y + yy))*0.5f;
Iy = (tex2D(texRef_pyramid_prev, prev_x + xx, prev_y + yy+1) - tex2D(texRef_pyramid_prev, prev_x + xx, prev_y + yy-1))*0.5f;
sum_Ixx += Ix*Ix;
sum_Ixy += Ix*Iy;
sum_Iyy += Iy*Iy;
}
}
det = sum_Ixx*sum_Iyy - sum_Ixy*sum_Ixy;
if(det < 0.00001f) {
status[idx] = 0;
return;
}
D = 1/det;
// Iteration part
for(j=0; j < 10; j++) {
if(cur_x < 0 || cur_x > pyr_w || cur_y < 0 || cur_y > pyr_h) {
status[idx] = 0;
return;
}
sum_Ixt = 0;
sum_Iyt = 0;
// No explicit handling of pixels outside the image ... maybe we don't have to because the hardware interpolation scheme
// will always give a result for pixels outside the image. How greatly the duplicated pixel values affect the result is unknown at the moment.
for(yy=-PATCH_R; yy <= PATCH_R; yy++) {
for(xx=-PATCH_R; xx <= PATCH_R; xx++) {
I = tex2D(texRef_pyramid_prev, prev_x + xx, prev_y + yy);
J = tex2D(texRef_pyramid_cur, cur_x + xx, cur_y + yy);
Ix = (tex2D(texRef_pyramid_prev, prev_x + xx+1, prev_y + yy) - tex2D(texRef_pyramid_prev, prev_x + xx-1, prev_y + yy))*0.5f;
Iy = (tex2D(texRef_pyramid_prev, prev_x + xx, prev_y + yy+1) - tex2D(texRef_pyramid_prev, prev_x + xx, prev_y + yy-1))*0.5f;
It = J - I;
sum_Ixt += Ix*It;
sum_Iyt += Iy*It;
}
}
// Find the inverse of the 2x2 matrix using a mix of determinant and adjugate matrix
// http://cnx.org/content/m19446/latest/
vx = D*(-sum_Iyy*sum_Ixt + sum_Ixy*sum_Iyt);
vy = D*( sum_Ixy*sum_Ixt - sum_Ixx*sum_Iyt);
Vx += vx;
Vy += vy;
cur_x += vx;
cur_y += vy;
// Movement very small
if(fabsf(vx) < 0.01f && fabsf(vy) < 0.01f)
break;
}
if(level != 0) {
cur_x += cur_x;
cur_y += cur_y;
Vx += Vx;
Vy += Vy;
}
dx[idx] = Vx;
dy[idx] = Vy;
}
cudaLK::cudaLK()
{
}
cudaLK::~cudaLK()
{
for(int i=0; i < LEVELS; i++) {
hipFree(gpu_img_pyramid_prev[i]);
hipFree(gpu_img_pyramid_cur[i]);
}
hipFree(gpu_smoothed_prev_x);
hipFree(gpu_smoothed_cur_x);
hipFree(gpu_smoothed_prev);
hipFree(gpu_smoothed_cur);
hipFreeArray(gpu_array_pyramid_prev);
hipFreeArray(gpu_array_pyramid_prev_Ix);
hipFreeArray(gpu_array_pyramid_prev_Iy);
hipFreeArray(gpu_array_pyramid_cur);
hipFree(gpu_dx);
hipFree(gpu_dy);
hipFree(gpu_status);
delete [] dx;
delete [] dy;
delete [] status;
}
void cudaLK::checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void cudaLK::initMem()
{
hipMalloc((void**)&gpu_img_prev_RGB, sizeof(char)*w*h*3);
hipMalloc((void**)&gpu_img_cur_RGB, sizeof(char)*w*h*3);
hipMalloc((void**)&gpu_img_pyramid_prev[0], sizeof(float)*w*h);
hipMalloc((void**)&gpu_img_pyramid_cur[0], sizeof(float)*w*h);
hipMalloc((void**)&gpu_smoothed_prev_x, sizeof(float)*w*h);
hipMalloc((void**)&gpu_smoothed_cur_x, sizeof(float)*w*h);
hipMalloc((void**)&gpu_smoothed_prev, sizeof(float)*w*h);
hipMalloc((void**)&gpu_smoothed_cur, sizeof(float)*w*h);
// Texture
hipMallocArray(&gpu_array_pyramid_prev, &texRef_pyramid_prev.channelDesc, w, h);
hipMallocArray(&gpu_array_pyramid_cur, &texRef_pyramid_cur.channelDesc, w, h);
hipBindTextureToArray(texRef_pyramid_prev, gpu_array_pyramid_prev);
hipBindTextureToArray(texRef_pyramid_cur, gpu_array_pyramid_cur);
texRef_pyramid_prev.normalized = 0;
texRef_pyramid_prev.filterMode = hipFilterModeLinear;
texRef_pyramid_prev.addressMode[0] = hipAddressModeClamp;
texRef_pyramid_prev.addressMode[1] = hipAddressModeClamp;
texRef_pyramid_cur.normalized = 0;
texRef_pyramid_cur.filterMode = hipFilterModeLinear;
texRef_pyramid_cur.addressMode[0] = hipAddressModeClamp;
texRef_pyramid_cur.addressMode[1] = hipAddressModeClamp;
hipMalloc((void**)&gpu_dx, sizeof(float)*w*h);
hipMalloc((void**)&gpu_dy, sizeof(float)*w*h);
hipMalloc((void**)&gpu_status, sizeof(char)*w*h);
int _w = w;
int _h = h;
dx = new float[w*h];
dy = new float[w*h];
status = new char[w*h];
pyr_w[0] = w;
pyr_h[0] = h;
for(int i=1; i < LEVELS; i++) {
_w /= 2;
_h /= 2;
pyr_w[i] = _w;
pyr_h[i] = _h;
hipMalloc((void**)&gpu_img_pyramid_prev[i], sizeof(float)*_w*_h);
hipMalloc((void**)&gpu_img_pyramid_cur[i], sizeof(float)*_w*_h);
}
}
void cudaLK::run(unsigned char *prev, unsigned char *cur, int _w, int _h)
{;
w = _w;
h = _h;
initMem();
int nThreadsX = NTHREAD_X;
int nThreadsY = NTHREAD_Y;
int blocksW = w/nThreadsX + ((w % nThreadsX)?1:0);
int blocksH = h/nThreadsY + ((h % nThreadsY )?1:0);
dim3 blocks(blocksW, blocksH);
dim3 threads(nThreadsX, nThreadsY);
int blocks1D = (w*h)/256 + (w*h % 256?1:0); // for greyscale
int start = getTimeNow();
int s;
// Copy image to GPU
s = getTimeNow();
hipMemcpy(gpu_img_prev_RGB, prev, w*h*3, hipMemcpyHostToDevice);
hipMemcpy(gpu_img_cur_RGB, cur, w*h*3, hipMemcpyHostToDevice);
checkCUDAError("start");
printf("Copying 2 images from CPU to GPU: %d ms\n", getTimeNow() - s);
// RGB -> grey
s = getTimeNow();
hipLaunchKernelGGL(( convertToGrey), dim3(blocks1D), dim3(256), 0, 0, gpu_img_prev_RGB, gpu_img_pyramid_prev[0], w*h);
hipLaunchKernelGGL(( convertToGrey), dim3(blocks1D), dim3(256), 0, 0, gpu_img_cur_RGB, gpu_img_pyramid_cur[0], w*h);
hipDeviceSynchronize();
checkCUDAError("convertToGrey");
printf("Converting from RGB to greyscale: %d ms\n", getTimeNow() - s);
s = getTimeNow();
for(int i=0; i < LEVELS-1; i++) {
hipLaunchKernelGGL(( smoothX), dim3(blocks), dim3(threads), 0, 0, gpu_img_pyramid_prev[i], pyr_w[i], pyr_h[i], gpu_smoothed_prev_x);
hipLaunchKernelGGL(( smoothX), dim3(blocks), dim3(threads), 0, 0, gpu_img_pyramid_cur[i], pyr_w[i], pyr_h[i], gpu_smoothed_cur_x);
hipDeviceSynchronize();
hipLaunchKernelGGL(( smoothY), dim3(blocks), dim3(threads), 0, 0, gpu_smoothed_prev_x, pyr_w[i], pyr_h[i], gpu_smoothed_prev);
hipLaunchKernelGGL(( smoothY), dim3(blocks), dim3(threads), 0, 0, gpu_smoothed_cur_x, pyr_w[i], pyr_h[i], gpu_smoothed_cur);
hipDeviceSynchronize();
hipLaunchKernelGGL(( pyrDownsample), dim3(blocks), dim3(threads), 0, 0, gpu_smoothed_prev, pyr_w[i], pyr_h[i], gpu_img_pyramid_prev[i+1], pyr_w[i+1], pyr_h[i+1]);
hipLaunchKernelGGL(( pyrDownsample), dim3(blocks), dim3(threads), 0, 0, gpu_smoothed_cur, pyr_w[i], pyr_h[i], gpu_img_pyramid_cur[i+1], pyr_w[i+1], pyr_h[i+1]);
hipDeviceSynchronize();
checkCUDAError("pyrDownsample here");
}
printf("Generating the pyramids: %d ms\n", getTimeNow() - s);
s = getTimeNow();
hipMemset(gpu_status, 1, sizeof(char)*w*h);
// Do the actual tracking
for(int l=LEVELS-1; l >= 0; l--) {
hipMemcpy2DToArray(gpu_array_pyramid_prev, 0, 0, gpu_img_pyramid_prev[l],
sizeof(float)*pyr_w[l], sizeof(float)*pyr_w[l], pyr_h[l], hipMemcpyDeviceToDevice);
hipMemcpy2DToArray(gpu_array_pyramid_cur, 0, 0, gpu_img_pyramid_cur[l],
sizeof(float)*pyr_w[l], sizeof(float)*pyr_w[l], pyr_h[l], hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( track), dim3(blocks), dim3(threads), 0, 0, w, h, pyr_w[l], pyr_w[l], scaling[l], l, (l == LEVELS-1), gpu_dx, gpu_dy, gpu_status);
hipDeviceSynchronize();
}
printf("Tracking: %d ms\n", getTimeNow() - s);
// Copy back results
s = getTimeNow();
hipMemcpy(dx, gpu_dx, sizeof(float)*w*h, hipMemcpyDeviceToHost);
hipMemcpy(dy, gpu_dy, sizeof(float)*w*h, hipMemcpyDeviceToHost);
hipMemcpy(status, gpu_status, sizeof(char)*w*h, hipMemcpyDeviceToHost);
printf("Copying results from GPU to CPU: %d ms\n", getTimeNow() - s);
printf("Total time for cudaLK: %d ms\n", getTimeNow() - start);
}
| e817728f23cd063f1224ab5673014b86fd4c391e.cu | #include "cudaLK.h"
#include <stdio.h>
const float scaling[] = {1, 0.5f, 0.25f, 0.125f, 0.0625f, 0.03125f, 0.015625f, 0.0078125f};
// Can't use an array of texture<> !! so we'll just re-use the one texture buffer for each image
texture<float, 2, cudaReadModeElementType> texRef_pyramid_prev;
texture<float, 2, cudaReadModeElementType> texRef_pyramid_cur;
__global__ void convertToGrey(unsigned char *d_in, float *d_out, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < N)
d_out[idx] = d_in[idx*3]*0.1144f + d_in[idx*3+1]*0.5867f + d_in[idx*3+2]*0.2989f;
}
__global__ void pyrDownsample(float *in, int w1, int h1, float *out, int w2, int h2)
{
// Input has to be greyscale
int x2 = blockIdx.x*blockDim.x + threadIdx.x;
int y2 = blockIdx.y*blockDim.y + threadIdx.y;
if( (x2 < w2) && (y2 < h2) ) {
int x = x2*2;
int y = y2*2;
int x_1 = x-1;
int y_1 = y-1;
int x_2 = x+1;
int y_2 = y+1;
if(x_1 < 0) x_1 = 0;
if(y_1 < 0) y_1 = 0;
if(x_2 >= w1) x_2 = w1 - 1;
if(y_2 >= h1) y_2 = h1 - 1;
out[y2*w2 + x2] = 0.25f*in[y*w1+x] + 0.125f*(in[y*w1+x_1] + in[y*w1+x_2] + in[y_1*w1+x] + in[y_2*w1+x]) +
0.0625f*(in[y_1*w1+x_1] + in[y_2*w1+x_1] + in[y_1*w1+x_2] + in[y_2*w1+x_2]);
}
}
__global__ void smoothX(float *in, int w, int h, float *out)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= w || y >= h)
return;
int idx = y*w;
int a = x-2;
int b = x-1;
int c = x;
int d = x+1;
int e = x+2;
if(a < 0) a = 0;
if(b < 0) b = 0;
if(c >= w) c = w-1;
if(d >= w) d = w-1;
out[y*w+x] = 0.0625f*in[idx+a] + 0.25f*in[idx+b] + 0.375f*in[idx+c] + 0.25f*in[idx+d] + 0.0625f*in[idx+e];
}
__global__ void smoothY(float *in, int w, int h, float *out)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= w || y >= h)
return;
int a = y-2;
int b = y-1;
int c = y;
int d = y+1;
int e = y+2;
if(a < 0) a = 0;
if(b < 0) b = 0;
if(c >= h) c = h-1;
if(d >= h) d = h-1;
out[y*w+x] = 0.0625f*in[a*w+x] + 0.25f*in[b*w+x] + 0.375f*in[c*w+x] + 0.25f*in[d*w+x] + 0.0625f*in[e*w+x];
}
// Call recursively
// w/h - original dimension of image
__global__ void track(const int w, const int h,
const int pyr_w, const int pyr_h,
float scaling, int level, char initGuess,
float *dx, float *dy, char *status)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int idx = y*w + x;
if(x > w-1 || y > h-1)
return;
if(status[idx] == 0)
return;
float prev_x = x*scaling;
float prev_y = y*scaling;
float Vx, Vy;
float cur_x, cur_y;
float sum_Ixx = 0;
float sum_Ixy = 0;
float sum_Iyy = 0;
float sum_Ixt;
float sum_Iyt;
float Ix, Iy, It;
int xx, yy;
float det, D;
float I, J;
float vx, vy;
int j;
if(initGuess) {
Vx = 0;
Vy = 0;
cur_x = prev_x;
cur_y = prev_y;
}
else {
Vx = dx[idx];
Vy = dy[idx];
cur_x = prev_x + Vx;
cur_y = prev_y + Vy;
}
// Calculate spatial gradient
for(yy=-PATCH_R; yy <= PATCH_R; yy++) {
for(xx=-PATCH_R; xx <= PATCH_R; xx++) {
Ix = (tex2D(texRef_pyramid_prev, prev_x + xx+1, prev_y + yy) - tex2D(texRef_pyramid_prev, prev_x + xx-1, prev_y + yy))*0.5f;
Iy = (tex2D(texRef_pyramid_prev, prev_x + xx, prev_y + yy+1) - tex2D(texRef_pyramid_prev, prev_x + xx, prev_y + yy-1))*0.5f;
sum_Ixx += Ix*Ix;
sum_Ixy += Ix*Iy;
sum_Iyy += Iy*Iy;
}
}
det = sum_Ixx*sum_Iyy - sum_Ixy*sum_Ixy;
if(det < 0.00001f) {
status[idx] = 0;
return;
}
D = 1/det;
// Iteration part
for(j=0; j < 10; j++) {
if(cur_x < 0 || cur_x > pyr_w || cur_y < 0 || cur_y > pyr_h) {
status[idx] = 0;
return;
}
sum_Ixt = 0;
sum_Iyt = 0;
// No explicit handling of pixels outside the image ... maybe we don't have to because the hardware interpolation scheme
// will always give a result for pixels outside the image. How greatly the duplicated pixel values affect the result is unknown at the moment.
for(yy=-PATCH_R; yy <= PATCH_R; yy++) {
for(xx=-PATCH_R; xx <= PATCH_R; xx++) {
I = tex2D(texRef_pyramid_prev, prev_x + xx, prev_y + yy);
J = tex2D(texRef_pyramid_cur, cur_x + xx, cur_y + yy);
Ix = (tex2D(texRef_pyramid_prev, prev_x + xx+1, prev_y + yy) - tex2D(texRef_pyramid_prev, prev_x + xx-1, prev_y + yy))*0.5f;
Iy = (tex2D(texRef_pyramid_prev, prev_x + xx, prev_y + yy+1) - tex2D(texRef_pyramid_prev, prev_x + xx, prev_y + yy-1))*0.5f;
It = J - I;
sum_Ixt += Ix*It;
sum_Iyt += Iy*It;
}
}
// Find the inverse of the 2x2 matrix using a mix of determinant and adjugate matrix
// http://cnx.org/content/m19446/latest/
vx = D*(-sum_Iyy*sum_Ixt + sum_Ixy*sum_Iyt);
vy = D*( sum_Ixy*sum_Ixt - sum_Ixx*sum_Iyt);
Vx += vx;
Vy += vy;
cur_x += vx;
cur_y += vy;
// Movement very small
if(fabsf(vx) < 0.01f && fabsf(vy) < 0.01f)
break;
}
if(level != 0) {
cur_x += cur_x;
cur_y += cur_y;
Vx += Vx;
Vy += Vy;
}
dx[idx] = Vx;
dy[idx] = Vy;
}
cudaLK::cudaLK()
{
}
cudaLK::~cudaLK()
{
for(int i=0; i < LEVELS; i++) {
cudaFree(gpu_img_pyramid_prev[i]);
cudaFree(gpu_img_pyramid_cur[i]);
}
cudaFree(gpu_smoothed_prev_x);
cudaFree(gpu_smoothed_cur_x);
cudaFree(gpu_smoothed_prev);
cudaFree(gpu_smoothed_cur);
cudaFreeArray(gpu_array_pyramid_prev);
cudaFreeArray(gpu_array_pyramid_prev_Ix);
cudaFreeArray(gpu_array_pyramid_prev_Iy);
cudaFreeArray(gpu_array_pyramid_cur);
cudaFree(gpu_dx);
cudaFree(gpu_dy);
cudaFree(gpu_status);
delete [] dx;
delete [] dy;
delete [] status;
}
void cudaLK::checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void cudaLK::initMem()
{
cudaMalloc((void**)&gpu_img_prev_RGB, sizeof(char)*w*h*3);
cudaMalloc((void**)&gpu_img_cur_RGB, sizeof(char)*w*h*3);
cudaMalloc((void**)&gpu_img_pyramid_prev[0], sizeof(float)*w*h);
cudaMalloc((void**)&gpu_img_pyramid_cur[0], sizeof(float)*w*h);
cudaMalloc((void**)&gpu_smoothed_prev_x, sizeof(float)*w*h);
cudaMalloc((void**)&gpu_smoothed_cur_x, sizeof(float)*w*h);
cudaMalloc((void**)&gpu_smoothed_prev, sizeof(float)*w*h);
cudaMalloc((void**)&gpu_smoothed_cur, sizeof(float)*w*h);
// Texture
cudaMallocArray(&gpu_array_pyramid_prev, &texRef_pyramid_prev.channelDesc, w, h);
cudaMallocArray(&gpu_array_pyramid_cur, &texRef_pyramid_cur.channelDesc, w, h);
cudaBindTextureToArray(texRef_pyramid_prev, gpu_array_pyramid_prev);
cudaBindTextureToArray(texRef_pyramid_cur, gpu_array_pyramid_cur);
texRef_pyramid_prev.normalized = 0;
texRef_pyramid_prev.filterMode = cudaFilterModeLinear;
texRef_pyramid_prev.addressMode[0] = cudaAddressModeClamp;
texRef_pyramid_prev.addressMode[1] = cudaAddressModeClamp;
texRef_pyramid_cur.normalized = 0;
texRef_pyramid_cur.filterMode = cudaFilterModeLinear;
texRef_pyramid_cur.addressMode[0] = cudaAddressModeClamp;
texRef_pyramid_cur.addressMode[1] = cudaAddressModeClamp;
cudaMalloc((void**)&gpu_dx, sizeof(float)*w*h);
cudaMalloc((void**)&gpu_dy, sizeof(float)*w*h);
cudaMalloc((void**)&gpu_status, sizeof(char)*w*h);
int _w = w;
int _h = h;
dx = new float[w*h];
dy = new float[w*h];
status = new char[w*h];
pyr_w[0] = w;
pyr_h[0] = h;
for(int i=1; i < LEVELS; i++) {
_w /= 2;
_h /= 2;
pyr_w[i] = _w;
pyr_h[i] = _h;
cudaMalloc((void**)&gpu_img_pyramid_prev[i], sizeof(float)*_w*_h);
cudaMalloc((void**)&gpu_img_pyramid_cur[i], sizeof(float)*_w*_h);
}
}
void cudaLK::run(unsigned char *prev, unsigned char *cur, int _w, int _h)
{;
w = _w;
h = _h;
initMem();
int nThreadsX = NTHREAD_X;
int nThreadsY = NTHREAD_Y;
int blocksW = w/nThreadsX + ((w % nThreadsX)?1:0);
int blocksH = h/nThreadsY + ((h % nThreadsY )?1:0);
dim3 blocks(blocksW, blocksH);
dim3 threads(nThreadsX, nThreadsY);
int blocks1D = (w*h)/256 + (w*h % 256?1:0); // for greyscale
int start = getTimeNow();
int s;
// Copy image to GPU
s = getTimeNow();
cudaMemcpy(gpu_img_prev_RGB, prev, w*h*3, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_img_cur_RGB, cur, w*h*3, cudaMemcpyHostToDevice);
checkCUDAError("start");
printf("Copying 2 images from CPU to GPU: %d ms\n", getTimeNow() - s);
// RGB -> grey
s = getTimeNow();
convertToGrey<<<blocks1D, 256>>>(gpu_img_prev_RGB, gpu_img_pyramid_prev[0], w*h);
convertToGrey<<<blocks1D, 256>>>(gpu_img_cur_RGB, gpu_img_pyramid_cur[0], w*h);
cudaThreadSynchronize();
checkCUDAError("convertToGrey");
printf("Converting from RGB to greyscale: %d ms\n", getTimeNow() - s);
s = getTimeNow();
for(int i=0; i < LEVELS-1; i++) {
smoothX<<<blocks, threads>>>(gpu_img_pyramid_prev[i], pyr_w[i], pyr_h[i], gpu_smoothed_prev_x);
smoothX<<<blocks, threads>>>(gpu_img_pyramid_cur[i], pyr_w[i], pyr_h[i], gpu_smoothed_cur_x);
cudaThreadSynchronize();
smoothY<<<blocks, threads>>>(gpu_smoothed_prev_x, pyr_w[i], pyr_h[i], gpu_smoothed_prev);
smoothY<<<blocks, threads>>>(gpu_smoothed_cur_x, pyr_w[i], pyr_h[i], gpu_smoothed_cur);
cudaThreadSynchronize();
pyrDownsample<<<blocks, threads>>>(gpu_smoothed_prev, pyr_w[i], pyr_h[i], gpu_img_pyramid_prev[i+1], pyr_w[i+1], pyr_h[i+1]);
pyrDownsample<<<blocks, threads>>>(gpu_smoothed_cur, pyr_w[i], pyr_h[i], gpu_img_pyramid_cur[i+1], pyr_w[i+1], pyr_h[i+1]);
cudaThreadSynchronize();
checkCUDAError("pyrDownsample here");
}
printf("Generating the pyramids: %d ms\n", getTimeNow() - s);
s = getTimeNow();
cudaMemset(gpu_status, 1, sizeof(char)*w*h);
// Do the actual tracking
for(int l=LEVELS-1; l >= 0; l--) {
cudaMemcpy2DToArray(gpu_array_pyramid_prev, 0, 0, gpu_img_pyramid_prev[l],
sizeof(float)*pyr_w[l], sizeof(float)*pyr_w[l], pyr_h[l], cudaMemcpyDeviceToDevice);
cudaMemcpy2DToArray(gpu_array_pyramid_cur, 0, 0, gpu_img_pyramid_cur[l],
sizeof(float)*pyr_w[l], sizeof(float)*pyr_w[l], pyr_h[l], cudaMemcpyDeviceToDevice);
track<<<blocks, threads>>>(w, h, pyr_w[l], pyr_w[l], scaling[l], l, (l == LEVELS-1), gpu_dx, gpu_dy, gpu_status);
cudaThreadSynchronize();
}
printf("Tracking: %d ms\n", getTimeNow() - s);
// Copy back results
s = getTimeNow();
cudaMemcpy(dx, gpu_dx, sizeof(float)*w*h, cudaMemcpyDeviceToHost);
cudaMemcpy(dy, gpu_dy, sizeof(float)*w*h, cudaMemcpyDeviceToHost);
cudaMemcpy(status, gpu_status, sizeof(char)*w*h, cudaMemcpyDeviceToHost);
printf("Copying results from GPU to CPU: %d ms\n", getTimeNow() - s);
printf("Total time for cudaLK: %d ms\n", getTimeNow() - start);
}
|
a13ef5a844d8cdd3846f5843826fba7f3a94653f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define m 10
#define n 5
__global__ void matrix_sum(int A[], int B[], int C[], int fil, int col)
{
int my_ij = blockDim.x * blockIdx.x + threadIdx.x;
if (blockIdx.x < fil && threadIdx.x < col)
C[my_ij] = A[my_ij] + B[my_ij];
}
void fill_matrix(int A[], int fil, int col) {
int i, j;
for (i = 0; i < fil; i++) {
for (j = 0; j < col; j++)
A[i*n+j] = rand()%99;
}
}
void print_matrix(int A[], int fil, int col) {
int i, j;
for (i = 0; i < fil; i++) {
for (j = 0; j < col; j++)
printf("%d ", A[i*n+j]);
printf("\n");
}
}
int main(int argc, char* argv[]) {
int *h_A, *h_B, *h_C;
int *d_A, *d_B, *d_C;
size_t size;
size = m*n*sizeof(int);
h_A = (int*) malloc(size);
h_B = (int*) malloc(size);
h_C = (int*) malloc(size);
fill_matrix(h_A, m, n);
fill_matrix(h_B, m, n);
print_matrix(h_A, m, n);
printf("\n");
print_matrix(h_B, m, n);
printf("\n");
hipMalloc((void **)&d_A, size);
hipMalloc((void **)&d_B, size);
hipMalloc((void **)&d_C, size);
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matrix_sum), dim3(m), dim3(n), 0, 0, d_A, d_B, d_C, m, n);
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
print_matrix(h_C, m, n);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
| a13ef5a844d8cdd3846f5843826fba7f3a94653f.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define m 10
#define n 5
__global__ void matrix_sum(int A[], int B[], int C[], int fil, int col)
{
int my_ij = blockDim.x * blockIdx.x + threadIdx.x;
if (blockIdx.x < fil && threadIdx.x < col)
C[my_ij] = A[my_ij] + B[my_ij];
}
void fill_matrix(int A[], int fil, int col) {
int i, j;
for (i = 0; i < fil; i++) {
for (j = 0; j < col; j++)
A[i*n+j] = rand()%99;
}
}
void print_matrix(int A[], int fil, int col) {
int i, j;
for (i = 0; i < fil; i++) {
for (j = 0; j < col; j++)
printf("%d ", A[i*n+j]);
printf("\n");
}
}
int main(int argc, char* argv[]) {
int *h_A, *h_B, *h_C;
int *d_A, *d_B, *d_C;
size_t size;
size = m*n*sizeof(int);
h_A = (int*) malloc(size);
h_B = (int*) malloc(size);
h_C = (int*) malloc(size);
fill_matrix(h_A, m, n);
fill_matrix(h_B, m, n);
print_matrix(h_A, m, n);
printf("\n");
print_matrix(h_B, m, n);
printf("\n");
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_C, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
matrix_sum<<<m, n>>>(d_A, d_B, d_C, m, n);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
print_matrix(h_C, m, n);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
90cd02adbb49a0a509ec89bbb1a97a4868d20cbe.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pca.cuh"
#include <cuml/decomposition/pca.hpp>
#include <cuml/decomposition/pca_mg.hpp>
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <cumlprims/opg/linalg/qr_based_svd.hpp>
#include <cumlprims/opg/matrix/matrix_utils.hpp>
#include <cumlprims/opg/stats/cov.hpp>
#include <cumlprims/opg/stats/mean.hpp>
#include <cumlprims/opg/stats/mean_center.hpp>
#include <raft/core/comms.hpp>
#include <raft/linalg/transpose.cuh>
#include <raft/matrix/math.cuh>
#include <raft/stats/mean_center.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace PCA {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
hipStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
const auto& comm = handle.get_comms();
Matrix::Data<T> mu_data{mu, prms.n_cols};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
rmm::device_uvector<T> cov_data(prms.n_cols * prms.n_cols, streams[0]);
auto cov_data_size = cov_data.size();
Matrix::Data<T> cov{cov_data.data(), cov_data_size};
Stats::opg::cov(handle, cov, input_data, input_desc, mu_data, true, streams, n_streams);
ML::truncCompExpVars<T, mg_solver>(
handle, cov.ptr, components, explained_var, explained_var_ratio, prms, streams[0]);
T scalar = (prms.n_rows - 1);
raft::matrix::seqRoot(explained_var, singular_vals, scalar, prms.n_components, streams[0], true);
Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams);
}
/**
* @brief performs MNMG fit operation for the pca
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param input: input data
* @input param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @output param mu: mean of every column in input
* @output param noise_vars: variance of the noise
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
// Reference issue https://github.com/rapidsai/cuml/issues/2470
auto n_streams = input_desc.blocksOwnedBy(rank).size();
hipStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamCreate(&streams[i]));
}
if (prms.algorithm == mg_solver::COV_EIG_JACOBI || prms.algorithm == mg_solver::COV_EIG_DQ) {
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
} else if (prms.algorithm == mg_solver::QR) {
const raft::handle_t& h = handle;
hipStream_t stream = h.get_stream();
const auto& comm = h.get_comms();
// Center the data
Matrix::Data<T> mu_data{mu, prms.n_cols};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
Stats::opg::mean_center(input_data, input_desc, mu_data, comm, streams, n_streams);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
// Allocate Q, S and V and call QR
std::vector<Matrix::Data<T>*> uMatrixParts;
Matrix::opg::allocate(h, uMatrixParts, input_desc, rank, stream);
rmm::device_uvector<T> sVector(prms.n_cols, stream);
rmm::device_uvector<T> vMatrix(prms.n_cols * prms.n_cols, stream);
RAFT_CUDA_TRY(hipMemset(vMatrix.data(), 0, prms.n_cols * prms.n_cols * sizeof(T)));
LinAlg::opg::svdQR(h,
sVector.data(),
uMatrixParts,
vMatrix.data(),
true,
true,
prms.tol,
prms.n_iterations,
input_data,
input_desc,
rank);
// sign flip
sign_flip(handle, uMatrixParts, input_desc, vMatrix.data(), prms.n_cols, streams, n_streams);
// Calculate instance variables
rmm::device_uvector<T> explained_var_all(prms.n_cols, stream);
rmm::device_uvector<T> explained_var_ratio_all(prms.n_cols, stream);
T scalar = 1.0 / (prms.n_rows - 1);
raft::matrix::power(sVector.data(), explained_var_all.data(), scalar, prms.n_cols, stream);
raft::matrix::ratio(
handle, explained_var_all.data(), explained_var_ratio_all.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(
sVector.data(), prms.n_cols, singular_vals, prms.n_components, std::size_t(1), stream);
raft::matrix::truncZeroOrigin(explained_var_all.data(),
prms.n_cols,
explained_var,
prms.n_components,
std::size_t(1),
stream);
raft::matrix::truncZeroOrigin(explained_var_ratio_all.data(),
prms.n_cols,
explained_var_ratio,
prms.n_components,
std::size_t(1),
stream);
raft::linalg::transpose(vMatrix.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(
vMatrix.data(), prms.n_cols, components, prms.n_components, prms.n_cols, stream);
Matrix::opg::deallocate(h, uMatrixParts, input_desc, rank, stream);
// Re-add mean to centered data
Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamDestroy(streams[i]));
}
}
template <typename T>
void transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input,
const Matrix::PartDescriptor input_desc,
T* components,
std::vector<Matrix::Data<T>*>& trans_input,
T* singular_vals,
T* mu,
const paramsPCAMG prms,
hipStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks;
if (prms.whiten) {
T scalar = T(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_cols * prms.n_components, streams[0]);
raft::matrix::matrixVectorBinaryDivSkipZero(
components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]);
}
for (std::size_t i = 0; i < input.size(); i++) {
auto si = i % n_streams;
raft::stats::meanCenter(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
components,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
alpha,
beta,
streams[si]);
raft::stats::meanAdd(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
}
if (prms.whiten) {
raft::matrix::matrixVectorBinaryMultSkipZero(
components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]);
T scalar = T(1 / sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_cols * prms.n_components, streams[0]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param components: principal components of the input data
* @output param trans_input: transformed input data
* @input param singular_vals: singular values of the data
* @input param mu: mean of every column in input
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
T* components,
Matrix::Data<T>** trans_input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
bool verbose)
{
// We want to update the API of this function, and other functions with
// regards to https://github.com/rapidsai/cuml/issues/2471
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
hipStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamCreate(&streams[i]));
}
transform_impl(handle,
input_data,
input_desc,
components,
trans_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamDestroy(streams[i]));
}
}
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& trans_input,
Matrix::PartDescriptor trans_input_desc,
T* components,
std::vector<Matrix::Data<T>*>& input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
hipStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = trans_input_desc.partsToRanks;
if (prms.whiten) {
T scalar = T(1 / sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_rows * prms.n_components, streams[0]);
raft::matrix::matrixVectorBinaryMultSkipZero(
components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]);
}
for (std::size_t i = 0; i < local_blocks.size(); i++) {
auto si = i % n_streams;
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
components,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
alpha,
beta,
streams[si]);
raft::stats::meanAdd(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
}
if (prms.whiten) {
raft::matrix::matrixVectorBinaryDivSkipZero(
components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]);
T scalar = T(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_rows * prms.n_components, streams[0]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG inverse transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param trans_input: transformed input data
* @input param components: principal components of the input data
* @output param input: input data
* @input param singular_vals: singular values of the data
* @input param mu: mean of every column in input
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** trans_input,
T* components,
Matrix::Data<T>** input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
Matrix::PartDescriptor trans_desc(prms.n_rows, prms.n_components, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
hipStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamCreate(&streams[i]));
}
inverse_transform_impl(handle,
trans_data,
trans_desc,
components,
input_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamDestroy(streams[i]));
}
}
/**
* @brief performs MNMG fit and transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @output param trans_input: transformed input data
* @output param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @output param mu: mean of every column in input
* @output param noise_vars: variance of the noise
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
Matrix::Data<T>** trans_input,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
hipStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamCreate(&streams[i]));
}
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
streams,
n_streams,
verbose);
transform_impl(handle,
input_data,
input_desc,
components,
trans_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
sign_flip(handle, trans_data, input_desc, components, prms.n_components, streams, n_streams);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::floatData_t** input,
Matrix::floatData_t** trans_input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_transform_impl(handle,
rank_sizes,
n_parts,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::doubleData_t** input,
Matrix::doubleData_t** trans_input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_transform_impl(handle,
rank_sizes,
n_parts,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** input,
float* components,
Matrix::Data<float>** trans_input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose)
{
transform_impl(
handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** input,
double* components,
Matrix::Data<double>** trans_input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose)
{
transform_impl(
handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** trans_input,
float* components,
Matrix::Data<float>** input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** trans_input,
double* components,
Matrix::Data<double>** input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose);
}
} // namespace opg
} // namespace PCA
} // namespace ML
| 90cd02adbb49a0a509ec89bbb1a97a4868d20cbe.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pca.cuh"
#include <cuml/decomposition/pca.hpp>
#include <cuml/decomposition/pca_mg.hpp>
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <cumlprims/opg/linalg/qr_based_svd.hpp>
#include <cumlprims/opg/matrix/matrix_utils.hpp>
#include <cumlprims/opg/stats/cov.hpp>
#include <cumlprims/opg/stats/mean.hpp>
#include <cumlprims/opg/stats/mean_center.hpp>
#include <raft/core/comms.hpp>
#include <raft/linalg/transpose.cuh>
#include <raft/matrix/math.cuh>
#include <raft/stats/mean_center.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace PCA {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
const auto& comm = handle.get_comms();
Matrix::Data<T> mu_data{mu, prms.n_cols};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
rmm::device_uvector<T> cov_data(prms.n_cols * prms.n_cols, streams[0]);
auto cov_data_size = cov_data.size();
Matrix::Data<T> cov{cov_data.data(), cov_data_size};
Stats::opg::cov(handle, cov, input_data, input_desc, mu_data, true, streams, n_streams);
ML::truncCompExpVars<T, mg_solver>(
handle, cov.ptr, components, explained_var, explained_var_ratio, prms, streams[0]);
T scalar = (prms.n_rows - 1);
raft::matrix::seqRoot(explained_var, singular_vals, scalar, prms.n_components, streams[0], true);
Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams);
}
/**
* @brief performs MNMG fit operation for the pca
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param input: input data
* @input param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @output param mu: mean of every column in input
* @output param noise_vars: variance of the noise
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
// Reference issue https://github.com/rapidsai/cuml/issues/2470
auto n_streams = input_desc.blocksOwnedBy(rank).size();
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
if (prms.algorithm == mg_solver::COV_EIG_JACOBI || prms.algorithm == mg_solver::COV_EIG_DQ) {
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
} else if (prms.algorithm == mg_solver::QR) {
const raft::handle_t& h = handle;
cudaStream_t stream = h.get_stream();
const auto& comm = h.get_comms();
// Center the data
Matrix::Data<T> mu_data{mu, prms.n_cols};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
Stats::opg::mean_center(input_data, input_desc, mu_data, comm, streams, n_streams);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
// Allocate Q, S and V and call QR
std::vector<Matrix::Data<T>*> uMatrixParts;
Matrix::opg::allocate(h, uMatrixParts, input_desc, rank, stream);
rmm::device_uvector<T> sVector(prms.n_cols, stream);
rmm::device_uvector<T> vMatrix(prms.n_cols * prms.n_cols, stream);
RAFT_CUDA_TRY(cudaMemset(vMatrix.data(), 0, prms.n_cols * prms.n_cols * sizeof(T)));
LinAlg::opg::svdQR(h,
sVector.data(),
uMatrixParts,
vMatrix.data(),
true,
true,
prms.tol,
prms.n_iterations,
input_data,
input_desc,
rank);
// sign flip
sign_flip(handle, uMatrixParts, input_desc, vMatrix.data(), prms.n_cols, streams, n_streams);
// Calculate instance variables
rmm::device_uvector<T> explained_var_all(prms.n_cols, stream);
rmm::device_uvector<T> explained_var_ratio_all(prms.n_cols, stream);
T scalar = 1.0 / (prms.n_rows - 1);
raft::matrix::power(sVector.data(), explained_var_all.data(), scalar, prms.n_cols, stream);
raft::matrix::ratio(
handle, explained_var_all.data(), explained_var_ratio_all.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(
sVector.data(), prms.n_cols, singular_vals, prms.n_components, std::size_t(1), stream);
raft::matrix::truncZeroOrigin(explained_var_all.data(),
prms.n_cols,
explained_var,
prms.n_components,
std::size_t(1),
stream);
raft::matrix::truncZeroOrigin(explained_var_ratio_all.data(),
prms.n_cols,
explained_var_ratio,
prms.n_components,
std::size_t(1),
stream);
raft::linalg::transpose(vMatrix.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(
vMatrix.data(), prms.n_cols, components, prms.n_components, prms.n_cols, stream);
Matrix::opg::deallocate(h, uMatrixParts, input_desc, rank, stream);
// Re-add mean to centered data
Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input,
const Matrix::PartDescriptor input_desc,
T* components,
std::vector<Matrix::Data<T>*>& trans_input,
T* singular_vals,
T* mu,
const paramsPCAMG prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks;
if (prms.whiten) {
T scalar = T(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_cols * prms.n_components, streams[0]);
raft::matrix::matrixVectorBinaryDivSkipZero(
components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]);
}
for (std::size_t i = 0; i < input.size(); i++) {
auto si = i % n_streams;
raft::stats::meanCenter(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
components,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
CUBLAS_OP_N,
CUBLAS_OP_T,
alpha,
beta,
streams[si]);
raft::stats::meanAdd(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
}
if (prms.whiten) {
raft::matrix::matrixVectorBinaryMultSkipZero(
components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]);
T scalar = T(1 / sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_cols * prms.n_components, streams[0]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param components: principal components of the input data
* @output param trans_input: transformed input data
* @input param singular_vals: singular values of the data
* @input param mu: mean of every column in input
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
T* components,
Matrix::Data<T>** trans_input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
bool verbose)
{
// We want to update the API of this function, and other functions with
// regards to https://github.com/rapidsai/cuml/issues/2471
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
transform_impl(handle,
input_data,
input_desc,
components,
trans_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& trans_input,
Matrix::PartDescriptor trans_input_desc,
T* components,
std::vector<Matrix::Data<T>*>& input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = trans_input_desc.partsToRanks;
if (prms.whiten) {
T scalar = T(1 / sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_rows * prms.n_components, streams[0]);
raft::matrix::matrixVectorBinaryMultSkipZero(
components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]);
}
for (std::size_t i = 0; i < local_blocks.size(); i++) {
auto si = i % n_streams;
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
components,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
streams[si]);
raft::stats::meanAdd(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
}
if (prms.whiten) {
raft::matrix::matrixVectorBinaryDivSkipZero(
components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]);
T scalar = T(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_rows * prms.n_components, streams[0]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG inverse transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param trans_input: transformed input data
* @input param components: principal components of the input data
* @output param input: input data
* @input param singular_vals: singular values of the data
* @input param mu: mean of every column in input
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** trans_input,
T* components,
Matrix::Data<T>** input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
Matrix::PartDescriptor trans_desc(prms.n_rows, prms.n_components, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
inverse_transform_impl(handle,
trans_data,
trans_desc,
components,
input_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
/**
* @brief performs MNMG fit and transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @output param trans_input: transformed input data
* @output param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @output param mu: mean of every column in input
* @output param noise_vars: variance of the noise
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
Matrix::Data<T>** trans_input,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
streams,
n_streams,
verbose);
transform_impl(handle,
input_data,
input_desc,
components,
trans_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
sign_flip(handle, trans_data, input_desc, components, prms.n_components, streams, n_streams);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::floatData_t** input,
Matrix::floatData_t** trans_input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_transform_impl(handle,
rank_sizes,
n_parts,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::doubleData_t** input,
Matrix::doubleData_t** trans_input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_transform_impl(handle,
rank_sizes,
n_parts,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** input,
float* components,
Matrix::Data<float>** trans_input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose)
{
transform_impl(
handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** input,
double* components,
Matrix::Data<double>** trans_input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose)
{
transform_impl(
handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** trans_input,
float* components,
Matrix::Data<float>** input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** trans_input,
double* components,
Matrix::Data<double>** input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose);
}
} // namespace opg
} // namespace PCA
} // namespace ML
|
73ce92d97708c6b2fc052cd0fbf52c4c5a5cd17e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "counting.h"
#include <cstdio>
#include <cassert>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
__device__ __host__ int CeilDiv(int a, int b) { return (a - 1) / b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__device__ __host__ int LowBit(int a) { return a&(-a); }
__global__ void Count_transform(const char *input, int *output, int text_size);
__global__ void Count_Sum(int *input, int *output, int text_size);
static void Launch_Count_transform_kernel(const char *input, int *output, int text_size, size_t grid_dim, size_t block_dim){
dim3 grid = dim3(grid_dim);
dim3 block= dim3(block_dim);
hipLaunchKernelGGL(( Count_transform), dim3(grid), dim3(block), 0, 0, input, output, text_size);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
}
static void Launch_Count_sum_kernel(int *input, int *output, int text_size, size_t grid_dim, size_t block_dim){
dim3 grid = dim3(grid_dim);
dim3 block= dim3(block_dim);
hipLaunchKernelGGL(( Count_Sum), dim3(grid), dim3(block), 0, 0, input, output, text_size);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
}
void CountPosition1(const char *text, int *pos, int text_size)
{
char *spaces;
hipMalloc((void **)&spaces, sizeof(char) * text_size);
hipMemset(spaces, 10, text_size);
thrust::device_ptr<const char> device_text = thrust::device_pointer_cast(text);
thrust::device_ptr<int> device_pos = thrust::device_pointer_cast(pos);
thrust::device_ptr<const char> device_spaces = thrust::device_pointer_cast(spaces);
thrust::transform(device_text, device_text + text_size, device_spaces, device_pos, thrust::not_equal_to<const char>());
thrust::inclusive_scan_by_key(device_pos, device_pos + text_size, device_pos, device_pos);
hipFree(spaces);
}
void CountPosition2(const char *text, int *pos, int text_size)
{
int block_dim = 128;
int *tmp_pos;
hipMalloc((void **)&tmp_pos, sizeof(int) * text_size);
hipMemset(tmp_pos, 0, sizeof(int) * text_size);
Launch_Count_transform_kernel(text, tmp_pos, text_size, CeilDiv(text_size, block_dim), block_dim);
Launch_Count_sum_kernel(tmp_pos, pos, text_size, CeilDiv(text_size, block_dim), block_dim);
}
__global__ void Count_transform(const char *input, int *output, int text_size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int result = 1;
if (tid < text_size)
{
if (input[tid] == 10)
{
result = 0;
}
output[tid] = result;
}
}
__global__ void Count_Sum(int *input, int *output, int text_size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < text_size)
{
int index = tid;
if (index == 0)
{
int sum = 0;
while (input[index] != 0)
{
sum++;
output[index] = sum;
index++;
}
}
else
{
if (input[index - 1] == 0 && input[index] == 1)
{
int sum = 0;
while (input[index] != 0)
{
sum++;
output[index] = sum;
index++;
}
}
}
}
} | 73ce92d97708c6b2fc052cd0fbf52c4c5a5cd17e.cu | #include "counting.h"
#include <cstdio>
#include <cassert>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
__device__ __host__ int CeilDiv(int a, int b) { return (a - 1) / b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__device__ __host__ int LowBit(int a) { return a&(-a); }
__global__ void Count_transform(const char *input, int *output, int text_size);
__global__ void Count_Sum(int *input, int *output, int text_size);
static void Launch_Count_transform_kernel(const char *input, int *output, int text_size, size_t grid_dim, size_t block_dim){
dim3 grid = dim3(grid_dim);
dim3 block= dim3(block_dim);
Count_transform<<<grid, block>>>(input, output, text_size);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
static void Launch_Count_sum_kernel(int *input, int *output, int text_size, size_t grid_dim, size_t block_dim){
dim3 grid = dim3(grid_dim);
dim3 block= dim3(block_dim);
Count_Sum<<<grid, block>>>(input, output, text_size);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
void CountPosition1(const char *text, int *pos, int text_size)
{
char *spaces;
cudaMalloc((void **)&spaces, sizeof(char) * text_size);
cudaMemset(spaces, 10, text_size);
thrust::device_ptr<const char> device_text = thrust::device_pointer_cast(text);
thrust::device_ptr<int> device_pos = thrust::device_pointer_cast(pos);
thrust::device_ptr<const char> device_spaces = thrust::device_pointer_cast(spaces);
thrust::transform(device_text, device_text + text_size, device_spaces, device_pos, thrust::not_equal_to<const char>());
thrust::inclusive_scan_by_key(device_pos, device_pos + text_size, device_pos, device_pos);
cudaFree(spaces);
}
void CountPosition2(const char *text, int *pos, int text_size)
{
int block_dim = 128;
int *tmp_pos;
cudaMalloc((void **)&tmp_pos, sizeof(int) * text_size);
cudaMemset(tmp_pos, 0, sizeof(int) * text_size);
Launch_Count_transform_kernel(text, tmp_pos, text_size, CeilDiv(text_size, block_dim), block_dim);
Launch_Count_sum_kernel(tmp_pos, pos, text_size, CeilDiv(text_size, block_dim), block_dim);
}
__global__ void Count_transform(const char *input, int *output, int text_size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int result = 1;
if (tid < text_size)
{
if (input[tid] == 10)
{
result = 0;
}
output[tid] = result;
}
}
__global__ void Count_Sum(int *input, int *output, int text_size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < text_size)
{
int index = tid;
if (index == 0)
{
int sum = 0;
while (input[index] != 0)
{
sum++;
output[index] = sum;
index++;
}
}
else
{
if (input[index - 1] == 0 && input[index] == 1)
{
int sum = 0;
while (input[index] != 0)
{
sum++;
output[index] = sum;
index++;
}
}
}
}
} |
e907efe382d584ac487de53f4bde257eaa1fcf7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
//////////////////////
//// Support Code
//////////////////////
#define INTDIV_POW2(a, b) (a >> b)
#define INTMOD_POW2(a, b) (a & ((1<<b)-1))
// GpuElemwise{Composite{((i0 * i1) + i2)}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, (True, True))
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_1(unsigned int numEls
, const int dim0
, const float * i0_data, int i0_str_0
, const float * i1_data, int i1_str_0
, const float * i2_data, int i2_str_0
, float * o0_data, int o0_str_0
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i0_value = i0_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i1_data = i1_data;
const float * ii_i2_data = i2_data;
float * ii_o0_data = o0_data;
int pos0 = ii;
ii_i1_data += pos0 * i1_str_0;
ii_i2_data += pos0 * i2_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i0_value * ii_i1_data[0];
o0_i = V_DUMMY_ID__tmp1 + ii_i2_data[0];
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{((i0 * i1) + i2)}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, (True, True))
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_2(unsigned int numEls
, const int dim0, const int dim1
, const float * i0_data, int i0_str_0, int i0_str_1
, const float * i1_data, int i1_str_0, int i1_str_1
, const float * i2_data, int i2_str_0, int i2_str_1
, float * o0_data, int o0_str_0, int o0_str_1
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i0_value = i0_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i1_data = i1_data;
const float * ii_i2_data = i2_data;
float * ii_o0_data = o0_data;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i1_data += pos1 * i1_str_1;
ii_i2_data += pos1 * i2_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i1_data += pos0 * i1_str_0;
ii_i2_data += pos0 * i2_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i0_value * ii_i1_data[0];
o0_i = V_DUMMY_ID__tmp1 + ii_i2_data[0];
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{((i0 * i1) + i2)}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, (True, True))
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_Ccontiguous (unsigned int numEls
, const float * i0_data
, const float * i1_data
, const float * i2_data
, float * o0_data
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i0_value = i0_data[0];
for (int i = idx; i < numEls; i += numThreads) {
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i0_value * i1_data[i];
o0_i = V_DUMMY_ID__tmp1 + i2_data[i];
}
o0_data[i] = o0_i;
}
}
static void can_collapse_node_adb95000fb2d8b41c5f4822585a03d95_0(int nd, const int * dims, const int * strides, int collapse[])
{
//can we collapse dims[i] and dims[i-1]
for(int i=nd-1;i>0;i--){
if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd
collapse[i]=1;
}else collapse[i]=0;
}
}
static int callkernel_node_adb95000fb2d8b41c5f4822585a03d95_0(unsigned int numEls, const int d,
const int * dims,
const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str,
float * o0_data, const int * o0_str)
{
numEls = dims[0]*dims[1]*1;
int local_dims[2];
int local_str[3][2];
int local_ostr[1][2];
int nd_collapse = 2;
for(int i=0;i<2;i++){//init new dim
local_dims[i]=dims[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[0][i]=i0_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[1][i]=i1_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[2][i]=i2_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_ostr[0][i]=o0_str[i];
}
for(int id=0;id<nd_collapse;id++){
bool all_broadcast=true;
for(int input_id=0;input_id<3;input_id++){
if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
for(int input_id=0;input_id<1;input_id++){
if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
if(all_broadcast){
for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
for(int input_id=0;input_id<3;input_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_str[input_id][j-1]=local_str[input_id][j];
}
}
for(int output_id=0;output_id<1;output_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_ostr[output_id][j-1]=local_ostr[output_id][j];
}
}
nd_collapse--; id--;
}
}
int nd_collapse_[2] = {1,1};
int nd_collapse_1[2] = {1,1};
can_collapse_node_adb95000fb2d8b41c5f4822585a03d95_0(nd_collapse, local_dims, local_str[1], nd_collapse_1);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_1[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_2[2] = {1,1};
can_collapse_node_adb95000fb2d8b41c5f4822585a03d95_0(nd_collapse, local_dims, local_str[2], nd_collapse_2);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_2[i]==0)
nd_collapse_[i]=0;
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[0][i-1]=local_str[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[0][j-1]=local_str[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[1][i-1]=local_str[1][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[1][j-1]=local_str[1][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[2][i-1]=local_str[2][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[2][j-1]=local_str[2][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_ostr[0][i-1]=local_ostr[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_ostr[0][j-1]=local_ostr[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_dims[i-1]*=local_dims[i];//set new dims
for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
}
}
for(int i=1, end=nd_collapse;i<end;i++){
if(nd_collapse_[i]==1)nd_collapse--;
}
if(nd_collapse == 1
&& local_str[1][nd_collapse-1]==1 && local_str[2][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1
){nd_collapse=0;}
if(numEls==0) return 0;
switch (nd_collapse==0?0:min(2,nd_collapse)) {
case 0: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_Ccontiguous), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, i0_data, i1_data, i2_data, o0_data);
//std::cerr << "calling callkernel returned\n";
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_adb95000fb2d8b41c5f4822585a03d95_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, o0_data)");
return -1;
}
return 0;
} break;
case 1: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_1), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_adb95000fb2d8b41c5f4822585a03d95_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0])");
return -1;
}
return 0;
} break;
case 2: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_2), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_adb95000fb2d8b41c5f4822585a03d95_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1])");
return -1;
}
return 0;
} break;
}
return -2;
}
namespace {
struct __struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95 {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V7;
PyObject* storage_V1;
__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V7);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V7 = storage_V7;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_7:
double __DUMMY_7;
__label_10:
double __DUMMY_10;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V7);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
CudaNdarray * V5;
PyObject* py_V7;
CudaNdarray * V7;
{
py_V1 = Py_None;
{Py_XINCREF(py_V1);}
V1 = NULL;
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V3)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 0 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V3)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[1], 1);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 1 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[1], 1);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 1 passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V5))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
V5 = (CudaNdarray*)py_V5;
//std::cerr << "c_extract " << V5 << '\n';
if (V5->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V5->nd);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << " nd check passed\n";
assert(V5);
Py_INCREF(py_V5);
}
else if (py_V5 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract done " << V5 << '\n';
{
py_V7 = PyList_GET_ITEM(storage_V7, 0);
{Py_XINCREF(py_V7);}
assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V7))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
V7 = (CudaNdarray*)py_V7;
//std::cerr << "c_extract " << V7 << '\n';
if (V7->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V7->nd);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << " nd check passed\n";
assert(V7);
Py_INCREF(py_V7);
}
else if (py_V7 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract done " << V7 << '\n';
{
// Op class GpuElemwise
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} START\n";
//standard elemwise size checks
int dims[2] = {1,1};
int broadcasts_V3[2] = {1, 1};
int broadcasts_V5[2] = {0, 0};
int broadcasts_V7[2] = {0, 0};
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V3\n";
if (2 != V3->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V3->nd);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i];
if ((!(broadcasts_V3[i] &&
CudaNdarray_HOST_DIMS(V3)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V3)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V3 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 0 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V3)[i],
dims[i]
);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
}
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V5\n";
if (2 != V5->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V5->nd);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i];
if ((!(broadcasts_V5[i] &&
CudaNdarray_HOST_DIMS(V5)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V5)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V5 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 1 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V5)[i],
dims[i]
);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
}
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V7\n";
if (2 != V7->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V7->nd);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i];
if ((!(broadcasts_V7[i] &&
CudaNdarray_HOST_DIMS(V7)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V7)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V7 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 2 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V7)[i],
dims[i]
);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
}
Py_XDECREF(V1);
V1 = V5;
Py_INCREF(V1);
for (int i = 0; (i< 2) && (V1); ++i) {
if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" 0 (indices start at 0), working inplace"
" on input 1, has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V1)[i],
dims[i]
);
Py_DECREF(V1);
V1 = NULL;
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
}
//std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n";
//std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n";
{
//new block so that failure gotos don't skip over variable initialization
//std::cerr << "calling callkernel\n";
if (callkernel_node_adb95000fb2d8b41c5f4822585a03d95_0(1, 0, dims
, CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3)
, CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5)
, CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7)
, CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1)
))
{
// error
Py_DECREF(V1);
V1 = NULL;
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
else // no error
{
}
}
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} END\n";
__label_9:
double __DUMMY_9;
}
__label_8:
//std::cerr << "cleanup " << py_V7 << " " << V7 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
if (V7)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt));
Py_XDECREF(V7);
}
//std::cerr << "cleanup done" << py_V7 << "\n";
{Py_XDECREF(py_V7);}
double __DUMMY_8;
}
__label_6:
//std::cerr << "cleanup " << py_V5 << " " << V5 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
if (V5)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt));
Py_XDECREF(V5);
}
//std::cerr << "cleanup done" << py_V5 << "\n";
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95_executor(__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95* self) {
return self->run();
}
static void __struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (5 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 5, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95* struct_ptr = new __struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95_executor), struct_ptr, __struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC initadb95000fb2d8b41c5f4822585a03d95(void){
(void) Py_InitModule("adb95000fb2d8b41c5f4822585a03d95", MyMethods);
}
| e907efe382d584ac487de53f4bde257eaa1fcf7b.cu | #include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
//////////////////////
//// Support Code
//////////////////////
#define INTDIV_POW2(a, b) (a >> b)
#define INTMOD_POW2(a, b) (a & ((1<<b)-1))
// GpuElemwise{Composite{((i0 * i1) + i2)}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, (True, True))
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_1(unsigned int numEls
, const int dim0
, const float * i0_data, int i0_str_0
, const float * i1_data, int i1_str_0
, const float * i2_data, int i2_str_0
, float * o0_data, int o0_str_0
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i0_value = i0_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i1_data = i1_data;
const float * ii_i2_data = i2_data;
float * ii_o0_data = o0_data;
int pos0 = ii;
ii_i1_data += pos0 * i1_str_0;
ii_i2_data += pos0 * i2_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i0_value * ii_i1_data[0];
o0_i = V_DUMMY_ID__tmp1 + ii_i2_data[0];
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{((i0 * i1) + i2)}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, (True, True))
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_2(unsigned int numEls
, const int dim0, const int dim1
, const float * i0_data, int i0_str_0, int i0_str_1
, const float * i1_data, int i1_str_0, int i1_str_1
, const float * i2_data, int i2_str_0, int i2_str_1
, float * o0_data, int o0_str_0, int o0_str_1
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i0_value = i0_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i1_data = i1_data;
const float * ii_i2_data = i2_data;
float * ii_o0_data = o0_data;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i1_data += pos1 * i1_str_1;
ii_i2_data += pos1 * i2_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i1_data += pos0 * i1_str_0;
ii_i2_data += pos0 * i2_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i0_value * ii_i1_data[0];
o0_i = V_DUMMY_ID__tmp1 + ii_i2_data[0];
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{((i0 * i1) + i2)}}[(0, 1)]
// node.op.destroy_map={0: [1]}
// Input 0 CudaNdarrayType(float32, (True, True))
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, matrix)
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_Ccontiguous (unsigned int numEls
, const float * i0_data
, const float * i1_data
, const float * i2_data
, float * o0_data
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i0_value = i0_data[0];
for (int i = idx; i < numEls; i += numThreads) {
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i0_value * i1_data[i];
o0_i = V_DUMMY_ID__tmp1 + i2_data[i];
}
o0_data[i] = o0_i;
}
}
static void can_collapse_node_adb95000fb2d8b41c5f4822585a03d95_0(int nd, const int * dims, const int * strides, int collapse[])
{
//can we collapse dims[i] and dims[i-1]
for(int i=nd-1;i>0;i--){
if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd
collapse[i]=1;
}else collapse[i]=0;
}
}
static int callkernel_node_adb95000fb2d8b41c5f4822585a03d95_0(unsigned int numEls, const int d,
const int * dims,
const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str,
float * o0_data, const int * o0_str)
{
numEls = dims[0]*dims[1]*1;
int local_dims[2];
int local_str[3][2];
int local_ostr[1][2];
int nd_collapse = 2;
for(int i=0;i<2;i++){//init new dim
local_dims[i]=dims[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[0][i]=i0_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[1][i]=i1_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[2][i]=i2_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_ostr[0][i]=o0_str[i];
}
for(int id=0;id<nd_collapse;id++){
bool all_broadcast=true;
for(int input_id=0;input_id<3;input_id++){
if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
for(int input_id=0;input_id<1;input_id++){
if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
if(all_broadcast){
for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
for(int input_id=0;input_id<3;input_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_str[input_id][j-1]=local_str[input_id][j];
}
}
for(int output_id=0;output_id<1;output_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_ostr[output_id][j-1]=local_ostr[output_id][j];
}
}
nd_collapse--; id--;
}
}
int nd_collapse_[2] = {1,1};
int nd_collapse_1[2] = {1,1};
can_collapse_node_adb95000fb2d8b41c5f4822585a03d95_0(nd_collapse, local_dims, local_str[1], nd_collapse_1);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_1[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_2[2] = {1,1};
can_collapse_node_adb95000fb2d8b41c5f4822585a03d95_0(nd_collapse, local_dims, local_str[2], nd_collapse_2);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_2[i]==0)
nd_collapse_[i]=0;
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[0][i-1]=local_str[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[0][j-1]=local_str[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[1][i-1]=local_str[1][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[1][j-1]=local_str[1][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[2][i-1]=local_str[2][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[2][j-1]=local_str[2][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_ostr[0][i-1]=local_ostr[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_ostr[0][j-1]=local_ostr[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_dims[i-1]*=local_dims[i];//set new dims
for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
}
}
for(int i=1, end=nd_collapse;i<end;i++){
if(nd_collapse_[i]==1)nd_collapse--;
}
if(nd_collapse == 1
&& local_str[1][nd_collapse-1]==1 && local_str[2][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1
){nd_collapse=0;}
if(numEls==0) return 0;
switch (nd_collapse==0?0:min(2,nd_collapse)) {
case 0: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, o0_data);
//std::cerr << "calling callkernel returned\n";
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_adb95000fb2d8b41c5f4822585a03d95_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, o0_data)");
return -1;
}
return 0;
} break;
case 1: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_1<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_adb95000fb2d8b41c5f4822585a03d95_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0])");
return -1;
}
return 0;
} break;
case 2: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_2<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_adb95000fb2d8b41c5f4822585a03d95_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_adb95000fb2d8b41c5f4822585a03d95_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1])");
return -1;
}
return 0;
} break;
}
return -2;
}
namespace {
struct __struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95 {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V7;
PyObject* storage_V1;
__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V7);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V7 = storage_V7;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_7:
double __DUMMY_7;
__label_10:
double __DUMMY_10;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V7);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
CudaNdarray * V5;
PyObject* py_V7;
CudaNdarray * V7;
{
py_V1 = Py_None;
{Py_XINCREF(py_V1);}
V1 = NULL;
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V3)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 0 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[0], 0);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V3)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V3)[1], 1);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V3 << "checking bcast 1 <" << V3->str<< ">\n";
//std::cerr << "c_extract " << V3->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V3)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V3)[1], 1);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << "bcast check 1 passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V5))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
V5 = (CudaNdarray*)py_V5;
//std::cerr << "c_extract " << V5 << '\n';
if (V5->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V5->nd);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << " nd check passed\n";
assert(V5);
Py_INCREF(py_V5);
}
else if (py_V5 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract done " << V5 << '\n';
{
py_V7 = PyList_GET_ITEM(storage_V7, 0);
{Py_XINCREF(py_V7);}
assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V7))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
V7 = (CudaNdarray*)py_V7;
//std::cerr << "c_extract " << V7 << '\n';
if (V7->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V7->nd);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << " nd check passed\n";
assert(V7);
Py_INCREF(py_V7);
}
else if (py_V7 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract done " << V7 << '\n';
{
// Op class GpuElemwise
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} START\n";
//standard elemwise size checks
int dims[2] = {1,1};
int broadcasts_V3[2] = {1, 1};
int broadcasts_V5[2] = {0, 0};
int broadcasts_V7[2] = {0, 0};
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V3\n";
if (2 != V3->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V3->nd);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i];
if ((!(broadcasts_V3[i] &&
CudaNdarray_HOST_DIMS(V3)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V3)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V3 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 0 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V3)[i],
dims[i]
);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
}
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V5\n";
if (2 != V5->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V5->nd);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i];
if ((!(broadcasts_V5[i] &&
CudaNdarray_HOST_DIMS(V5)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V5)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V5 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 1 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V5)[i],
dims[i]
);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
}
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V7\n";
if (2 != V7->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V7->nd);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i];
if ((!(broadcasts_V7[i] &&
CudaNdarray_HOST_DIMS(V7)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V7)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} checking input V7 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 2 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V7)[i],
dims[i]
);
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
}
Py_XDECREF(V1);
V1 = V5;
Py_INCREF(V1);
for (int i = 0; (i< 2) && (V1); ++i) {
if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" 0 (indices start at 0), working inplace"
" on input 1, has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V1)[i],
dims[i]
);
Py_DECREF(V1);
V1 = NULL;
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
}
//std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n";
//std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n";
{
//new block so that failure gotos don't skip over variable initialization
//std::cerr << "calling callkernel\n";
if (callkernel_node_adb95000fb2d8b41c5f4822585a03d95_0(1, 0, dims
, CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3)
, CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5)
, CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7)
, CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1)
))
{
// error
Py_DECREF(V1);
V1 = NULL;
{
__failure = 9;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_9;};
}
else // no error
{
}
}
//std::cerr << "C_CODE Composite{((i0 * i1) + i2)} END\n";
__label_9:
double __DUMMY_9;
}
__label_8:
//std::cerr << "cleanup " << py_V7 << " " << V7 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
if (V7)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt));
Py_XDECREF(V7);
}
//std::cerr << "cleanup done" << py_V7 << "\n";
{Py_XDECREF(py_V7);}
double __DUMMY_8;
}
__label_6:
//std::cerr << "cleanup " << py_V5 << " " << V5 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
if (V5)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt));
Py_XDECREF(V5);
}
//std::cerr << "cleanup done" << py_V5 << "\n";
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95_executor(__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95* self) {
return self->run();
}
static void __struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (5 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 5, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95* struct_ptr = new __struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95_executor), struct_ptr, __struct_compiled_op_adb95000fb2d8b41c5f4822585a03d95_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC initadb95000fb2d8b41c5f4822585a03d95(void){
(void) Py_InitModule("adb95000fb2d8b41c5f4822585a03d95", MyMethods);
}
|
17a4cd3870f90f0cfa42cd7b6e8c2ba7a975c003.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#define TINYOBJLOADER_IMPLEMENTATION
#include "tiny_obj_loader.h"
#include "kernel_hip.cuh"
#include "Input.h"
#pragma region Structs
struct Ray
{
vec3 origin;
vec3 direction;
__host__ __device__ Ray(vec3 origin, vec3 direction)
{
this->origin = origin + direction * (ENABLE_SURFACE_ACNE ? 0 : EPSILON);
this->direction = direction;
}
};
struct Photon
{
__host__ __device__ Photon()
{
position = vec3(0, 0, 0);
normal = vec3(0, 0, 0);
power = vec3(0, 0, 0);
type = NONE;
isHit = false;
}
vec3 position;
vec3 normal;
vec3 power;
MaterialType type;
bool isHit;
};
struct Camera
{
__host__ __device__ Camera()
{
proj = glm::mat4(1.0f);
position = glm::vec3(0.0f, 46.0f, 126.0f);
fov = 70.0f;
nearPlane = 0.1f;
farPlane = 1000.0f;
moveSpeed = 25.0f;
mouseSpeed = 10.0f;
pitch = 0.0f;
yaw = 180.0f;
view = mat4(0);
proj = mat4(0);
aperture = 0;
focalDistance = 0.1f;
}
__device__ Ray GetRay(hiprandState_t* randState, int x, int y, bool dof)
{
float jitterValueX = hiprand_uniform(randState) - 0.5;
float jitterValueY = hiprand_uniform(randState) - 0.5;
vec3 wDir = glm::normalize(-forward);
vec3 uDir = glm::normalize(cross(up, wDir));
vec3 vDir = glm::cross(wDir, -uDir);
float top = __tanf(fov * glm::pi<float>() / 360.0f);
float right = aspectRatio * top;
float bottom = -top;
float left = -right;
float imPlaneUPos = left + (right - left)*(((float)x + jitterValueX) / (float)width);
float imPlaneVPos = bottom + (top - bottom)*(((float)y + jitterValueY) / (float)height);
vec3 originDirection = imPlaneUPos * uDir + imPlaneVPos * vDir - wDir;
vec3 pointOnImagePlane = position + ((originDirection) * focalDistance);
if (dof)
{
vec3 aperturePoint = vec3(0, 0, 0);
if (aperture >= EPSILON)
{
float r1 = hiprand_uniform(randState);
float r2 = hiprand_uniform(randState);
float angle = two_pi<float>() * r1;
float distance = aperture * sqrt(r2);
float apertureX = __cosf(angle) * distance;
float apertureY = __sinf(angle) * distance;
aperturePoint = position + (wDir * apertureX) + (uDir * apertureY);
}
else
{
aperturePoint = position;
}
return Ray(aperturePoint, normalize(pointOnImagePlane - aperturePoint));
}
else
{
return Ray(position, normalize(originDirection));
}
}
void UpdateScreen(int width, int height)
{
this->width = width;
this->height = height;
this->aspectRatio = width / (float)height;
glViewport(0, 0, width, height);
proj = perspective(radians(fov), aspectRatio, nearPlane, farPlane);
}
void UpdateCamera(float deltaTime)
{
vec2 input = vec2(IsKeyDown('w') ? 1 : IsKeyDown('s') ? -1 : 0, IsKeyDown('d') ? 1 : IsKeyDown('a') ? -1 : 0);
if (IsMouseDown(1))
HandleRotate(deltaTime);
HandleMove(input, deltaTime);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(fov, aspectRatio, nearPlane, farPlane);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
forward.x = cos(radians(pitch)) * sin(radians(yaw));
forward.y = sin(radians(pitch));
forward.z = cos(radians(pitch)) * cos(radians(yaw));
forward = normalize(forward);
right = normalize(cross(forward, vec3(0, 1, 0)));
up = normalize(cross(right, forward));
mat4 viewMatrix = lookAt(position, position + forward, up);
if (view != viewMatrix)
{
cudaDirty = true;
view = viewMatrix;
}
glMultMatrixf(value_ptr(view));
toggleMouseMovement = IsMouseDown(1);
}
bool toggleMouseMovement;
float width, height;
float moveSpeed, mouseSpeed;
float nearPlane, farPlane;
float fov;
float aspectRatio;
float pitch, yaw;
// fov
float aperture, focalDistance;
vec3 position;
vec3 forward, up, right;
mat4 view;
mat4 proj;
private:
void HandleRotate(float deltaTime)
{
if (toggleMouseMovement == false)
{
WarpMouse(width / 2, height / 2);
return;
}
int xPos, yPos;
GetMousePos(xPos, yPos);
pitch += mouseSpeed * float(height / 2 - yPos) * deltaTime;
yaw += mouseSpeed * float(width / 2 - xPos) * deltaTime;
pitch = clamp(pitch, -89.0f, 89.0f);
yaw = mod(yaw, 360.0f);
WarpMouse(width / 2, height / 2);
}
void HandleMove(vec2 input, float deltaTime)
{
position += (forward * input.x + right * input.y) * deltaTime * moveSpeed;
}
};
struct Material
{
__host__ __device__ Material(MaterialType type = DIFF, vec3 color = vec3(0), vec3 emission = vec3(0))
{
this->type = type;
this->color = color;
this->emission = emission;
}
MaterialType type;
vec3 color;
vec3 emission;
};
struct ObjectIntersection
{
__host__ __device__ ObjectIntersection(bool hit = false, float t = 0, vec3 normal = vec3(0), int materialID = -1)
{
this->hit = hit;
this->t = t;
this->normal = normal;
this->materialID = materialID;
}
bool hit;
float t;
vec3 normal;
int materialID;
};
struct Triangle
{
__host__ __device__ Triangle(vec3 p0 = vec3(0), vec3 p1 = vec3(0), vec3 p2 = vec3(0), vec3 n0 = vec3(0), vec3 n1 = vec3(0), vec3 n2 = vec3(0), int materialID = 0)
{
pos[0] = p0; pos[1] = p1; pos[2] = p2;
nor[0] = normalize(n0); nor[1] = normalize(n1); nor[2] = normalize(n2);
this->materialID = materialID;
}
__device__ ObjectIntersection Intersect(const Ray &ray) const
{
bool hit = false;
float u, v, t = 0;
vec3 normal = vec3(0);
vec3 v0v1 = pos[1] - pos[0];
vec3 v0v2 = pos[2] - pos[0];
vec3 pvec = cross(ray.direction, v0v2);
float det = dot(v0v1, pvec);
if (fabs(det) < EPSILON) return ObjectIntersection(hit, t, normal, materialID);
float invDet = 1.0f / det;
vec3 tvec = ray.origin - pos[0];
u = dot(tvec, pvec) * invDet;
if (u < 0 || u > 1) return ObjectIntersection(hit, t, normal, materialID);
vec3 qvec = cross(tvec, v0v1);
v = dot(ray.direction, qvec) * invDet;
if (v < 0 || u + v > 1) return ObjectIntersection(hit, t, normal, materialID);
t = dot(v0v2, qvec) * invDet;
if (t < EPSILON) return ObjectIntersection(hit, t, normal, materialID);
if (ENABLE_SMOOTH_NORMAL)
normal = normalize((1 - u - v) * nor[0] + u * nor[1] + v * nor[2]);
else
normal = normalize(cross(v0v1, v0v2));
hit = true;
return ObjectIntersection(hit, t, normal, materialID);
}
vec3 pos[3];
vec3 nor[3];
int materialID;
};
struct Sphere
{
__host__ __device__ Sphere(vec3 position = vec3(0), float radius = 0, Material material = Material())
{
this->position = position;
this->radius = radius;
this->materialID = materials.size();
materials.push_back(material);
}
float radius;
vec3 position;
int materialID;
__device__ ObjectIntersection Intersect(const Ray &ray)
{
bool hit = false;
float distance = 0, t = 0;
vec3 normal = vec3(0, 0, 0);
vec3 op = position - ray.origin;
float b = dot(op, ray.direction);
float det = b * b - dot(op, op) + radius * radius;
if (det < EPSILON)
return ObjectIntersection(hit, t, normal, materialID);
else
det = glm::sqrt(det);
distance = (t = b - det) > EPSILON ? t : ((t = b + det) > EPSILON ? t : 0);
if (distance > EPSILON)
{
hit = true;
normal = normalize(ray.direction * distance - op);
}
return ObjectIntersection(hit, distance, normal, materialID);
}
__device__ vec3 RandomPoint(hiprandState_t* randState)
{
float theta = hiprand_uniform(randState) * pi<float>();
float phi = hiprand_uniform(randState) * two_pi<float>();
// Convert to Cartesian and scale by radius
float dxr = radius * sin(theta) * cos(phi);
float dyr = radius * sin(theta) * sin(phi);
float dzr = radius * cos(theta);
return vec3(position.x + dxr, position.y + dyr, position.z + dzr);
}
};
#pragma region KDTree
struct AABB
{
__device__ __host__ AABB()
{
bounds[0] = vec3(0);
bounds[1] = vec3(1);
}
__device__ __host__ AABB(vec3 min, vec3 max)
{
bounds[0] = min;
bounds[1] = max;
}
__device__ __host__ AABB(Triangle* triangles, int count)
{
for (int i = 0; i < count; i++)
{
Expand(triangles[i]);
}
}
__device__ __host__ void Expand(Triangle triangle)
{
Expand
(
vec3
(
min(min(triangle.pos[0].x, triangle.pos[1].x), triangle.pos[2].x),
min(min(triangle.pos[0].y, triangle.pos[1].y), triangle.pos[2].y),
min(min(triangle.pos[0].z, triangle.pos[1].z), triangle.pos[2].z)
),
vec3
(
max(max(triangle.pos[0].x, triangle.pos[1].x), triangle.pos[2].x),
max(max(triangle.pos[0].y, triangle.pos[1].y), triangle.pos[2].y),
max(max(triangle.pos[0].z, triangle.pos[1].z), triangle.pos[2].z)
)
);
}
__device__ __host__ void Expand(vec3 min, vec3 max)
{
if (min.x < bounds[0].x) bounds[0].x = min.x;
if (min.y < bounds[0].y) bounds[0].y = min.y;
if (min.z < bounds[0].z) bounds[0].z = min.z;
if (max.x > bounds[1].x) bounds[1].x = max.x;
if (max.y > bounds[1].y) bounds[1].y = max.y;
if (max.z > bounds[1].z) bounds[1].z = max.z;
}
vec3 bounds[2];
};
struct KDTreeNode
{
__device__ __host__ KDTreeNode(int l = -1, int r = -1, int sa = -1, int ti = 0, int tn = 0, float sp = 0, int d = 0)
{
leftChild = l; rightChild = r; splitAxis = sa; triangleIndex = ti; triangleNumber = tn; splitPos = sp; depth = d;
}
__device__ __host__ KDTreeNode(const KDTreeNode& g)
{
leftChild = g.leftChild; rightChild = g.rightChild; splitAxis = g.splitAxis; triangleIndex = g.triangleIndex;
triangleNumber = g.triangleNumber; splitPos = g.splitPos; nodeAABB = g.nodeAABB; depth = g.depth;
}
int leftChild;
int rightChild;
int splitAxis;
int triangleIndex;
int triangleNumber;
float splitPos;
int depth;
AABB nodeAABB;
};
__device__ void AABBMax(vec3* x, vec3* y, vec3* z, vec3* dist)
{
float xmax = x->x > y->x ? x->x : y->x;
xmax = xmax > z->x ? xmax : z->x;
float ymax = x->y > y->y ? x->y : y->y;
ymax = ymax > z->y ? ymax : z->y;
float zmax = x->z > y->z ? x->z : y->z;
zmax = zmax > z->z ? zmax : z->z;
dist->x = xmax;
dist->y = ymax;
dist->z = zmax;
}
__device__ void AABBMin(vec3* x, vec3* y, vec3* z, vec3* dist)
{
float xmax = x->x < y->x ? x->x : y->x;
xmax = xmax < z->x ? xmax : z->x;
float ymax = x->y < y->y ? x->y : y->y;
ymax = ymax < z->y ? ymax : z->y;
float zmax = x->z < y->z ? x->z : y->z;
zmax = zmax < z->z ? zmax : z->z;
dist->x = xmax;
dist->y = ymax;
dist->z = zmax;
}
__global__ void CreateAABB(int n, Triangle* tri, AABB* aabb)
{
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n)
return;
AABBMax(&(tri[tid].pos[0]), &(tri[tid].pos[1]), &(tri[tid].pos[2]), &(aabb[tid].bounds[1]));
AABBMin(&(tri[tid].pos[0]), &(tri[tid].pos[1]), &(tri[tid].pos[2]), &(aabb[tid].bounds[0]));
}
__global__ void InitRoot(int nTri, KDTreeNode* nodes, unsigned int* nodesPtr, int* activeList, unsigned int* activeListPtr, unsigned int* nextListPtr, unsigned int* smallListPtr, unsigned int* tnaPtr, AABB aabb)
{
DeviceVector<int>::clear(activeListPtr);
DeviceVector<int>::clear(nextListPtr);
DeviceVector<int>::clear(smallListPtr);
DeviceVector<int>::clear(tnaPtr);
DeviceVector<KDTreeNode>::clear(nodesPtr);
KDTreeNode n;
n.triangleIndex = 0;
n.triangleNumber = nTri;
n.nodeAABB = aabb;
n.depth = 0;
DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, n);
*(tnaPtr) = nTri;
int i = 0;
DeviceVector<int>::push_back(activeList, activeListPtr, i);
}
__global__ void CopyTriangle(int* tna, int n)
{
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n)
return;
tna[tid] = tid;
}
__global__ void MidSplitNode(Triangle* tri, AABB* aabb, int nTri, KDTreeNode* nodes, unsigned int* nodesPtr, int* activeList, unsigned int* activeListPtr, int* nextList, unsigned int* nextListPtr, int* smallList, unsigned int* smallListPtr, int* tna, unsigned int* tnaPtr, int* tnahelper, unsigned int* tnahelperPtr, unsigned int tnaStartPtr)
{
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= *activeListPtr)
return;
//printf("tid=%d\n",tid);
int id = activeList[tid];
//printf("node triangle number=%d\n",nodes[id].triangleNumber);
int leftid;
int rightid;
float sp;
if (nodes[id].depth > KDTREE_MAX_DEPTH)
return;
//KDTreeNode currentNode(nodes[id]);
vec3 volume = nodes[id].nodeAABB.bounds[1] - nodes[id].nodeAABB.bounds[0];
if (volume.x >= volume.y && volume.x >= volume.z)// split x
{
nodes[id].splitAxis = 0;
sp = nodes[id].nodeAABB.bounds[0].x + volume.x / 2.0f;
nodes[id].splitPos = sp;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].x = sp;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].x = nodes[id].nodeAABB.bounds[1].x;
atarashiiNode.nodeAABB.bounds[0].x = sp;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
else if (volume.y >= volume.x && volume.y >= volume.z)// split y
{
nodes[id].splitAxis = 1;
sp = nodes[id].nodeAABB.bounds[0].y + volume.y / 2.0f;
nodes[id].splitPos = sp;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].y = sp;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].y = nodes[id].nodeAABB.bounds[1].y;
atarashiiNode.nodeAABB.bounds[0].y = sp;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
else // split z
{
nodes[id].splitAxis = 2;
sp = nodes[id].nodeAABB.bounds[0].z + volume.z / 2.0f;
nodes[id].splitPos = sp;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].z = sp;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].z = nodes[id].nodeAABB.bounds[1].z;
atarashiiNode.nodeAABB.bounds[0].z = sp;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
// split triangles
int leftcount = 0;
int rightcount = 0;
unsigned int tnapos;
int endPtr = nodes[id].triangleIndex + nodes[id].triangleNumber - 1;
/*printf("triangleIndex=%d\n", currentNode.triangleIndex);
printf("triangleNumber=%d\n", currentNode.triangleNumber);
printf("endPtr=%d\n", endPtr);*/
for (int i = nodes[id].triangleIndex; i <= endPtr; i++)
{
int triid = tna[i];
switch (nodes[id].splitAxis)
{
case 0:
if (aabb[triid].bounds[0].x <= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].x >= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
case 1:
if (aabb[triid].bounds[0].y <= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].y >= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
case 2:
if (aabb[triid].bounds[0].z <= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].z >= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
}
}
//printf("leftcount=%d\nrightcount=%d\n", leftcount, rightcount);
nodes[leftid].triangleNumber = leftcount;
nodes[rightid].triangleNumber = rightcount;
nodes[leftid].depth = nodes[id].depth + 1;
nodes[rightid].depth = nodes[id].depth + 1;
//printf("node %d was splited with left = %d and right = %d with sp=%.5f tna=%d\n", id, leftcount, rightcount, sp, *tnaPtr);
// add to nextList
if (leftcount > KDTREE_THRESHOLD * 2)
DeviceVector<int>::push_back(nextList, nextListPtr, leftid);
else if (leftcount > KDTREE_THRESHOLD)
DeviceVector<int>::push_back(smallList, smallListPtr, leftid);
if (rightcount > KDTREE_THRESHOLD * 2)
DeviceVector<int>::push_back(nextList, nextListPtr, rightid);
else if (rightcount > KDTREE_THRESHOLD)
DeviceVector<int>::push_back(smallList, smallListPtr, rightid);
}
__global__ void SAHSplitNode(Triangle* tri, AABB* aabb, int nTri, KDTreeNode* nodes, unsigned int* nodesPtr, int* smallList, unsigned int* smallListPtr, int* nextList, unsigned int* nextListPtr, int* tna, unsigned int* tnaPtr, int* tnahelper, unsigned int* tnahelperPtr, unsigned int tnaStartPtr)
{
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= *smallListPtr)
return;
//printf("tid=%d\n",tid);
int id = smallList[tid];
//printf("node triangle number=%d\n",nodes[id].triangleNumber);
int leftid;
int rightid;
float tpos;
//KDTreeNode currentNode(nodes[id]);
if (nodes[id].depth > KDTREE_MAX_DEPTH)
return;
vec3 volume = nodes[id].nodeAABB.bounds[1] - nodes[id].nodeAABB.bounds[0];
if (volume.x >= volume.y && volume.x >= volume.z)// split x
{
nodes[id].splitAxis = 0;
// looking for best candidate
float minsah = 999999.0f;
float minpos;
for (float p = 0.1f; p < 1.0f; p += 0.1f) {
tpos = nodes[id].nodeAABB.bounds[0].x + volume.x*p;
int ct1, ct2;
ct1 = ct2 = 0;
for (int i = nodes[id].triangleIndex, j = 0; j < nodes[id].triangleNumber; i++, j++) {
if ((aabb[tnaPtr[i]].bounds[0].x + aabb[tnaPtr[i]].bounds[1].x) / 2 < tpos)
ct1++;
else
ct2++;
}
float sah = ct1 * p + ct2 * (1 - p);
if (sah < minsah) {
minsah = sah;
minpos = tpos;
}
}
nodes[id].splitPos = tpos;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].x = tpos;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].x = nodes[id].nodeAABB.bounds[1].x;
atarashiiNode.nodeAABB.bounds[0].x = tpos;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
else if (volume.y >= volume.x && volume.y >= volume.z)// split y
{
nodes[id].splitAxis = 1;
// looking for best candidate
float minsah = 999999.0f;
float minpos;
for (float p = 0.1f; p < 1.0f; p += 0.1f) {
tpos = nodes[id].nodeAABB.bounds[0].y + volume.y*p;
int ct1, ct2;
ct1 = ct2 = 0;
for (int i = nodes[id].triangleIndex, j = 0; j < nodes[id].triangleNumber; i++, j++) {
if ((aabb[tnaPtr[i]].bounds[0].y + aabb[tnaPtr[i]].bounds[1].y) / 2 < tpos)
ct1++;
else
ct2++;
}
float sah = ct1 * p + ct2 * (1 - p);
if (sah < minsah) {
minsah = sah;
minpos = tpos;
}
}
nodes[id].splitPos = tpos;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].y = tpos;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].y = nodes[id].nodeAABB.bounds[1].y;
atarashiiNode.nodeAABB.bounds[0].y = tpos;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
else // split z
{
nodes[id].splitAxis = 2;
// looking for best candidate
float minsah = 999999.0f;
float minpos;
for (float p = 0.1f; p < 1.0f; p += 0.1f) {
tpos = nodes[id].nodeAABB.bounds[0].z + volume.z*p;
int ct1, ct2;
ct1 = ct2 = 0;
for (int i = nodes[id].triangleIndex, j = 0; j < nodes[id].triangleNumber; i++, j++) {
if ((aabb[tnaPtr[i]].bounds[0].z + aabb[tnaPtr[i]].bounds[1].z) / 2 < tpos)
ct1++;
else
ct2++;
}
float sah = ct1 * p + ct2 * (1 - p);
if (sah < minsah) {
minsah = sah;
minpos = tpos;
}
}
nodes[id].splitPos = tpos;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].z = tpos;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].z = nodes[id].nodeAABB.bounds[1].z;
atarashiiNode.nodeAABB.bounds[0].z = tpos;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
//printf("sp=%.3f\n",sp);
// split triangles
int leftcount = 0;
int rightcount = 0;
unsigned int tnapos;
int endPtr = nodes[id].triangleIndex + nodes[id].triangleNumber - 1;
/*printf("triangleIndex=%d\n", currentNode.triangleIndex);
printf("triangleNumber=%d\n", currentNode.triangleNumber);
printf("endPtr=%d\n", endPtr);*/
for (int i = nodes[id].triangleIndex; i <= endPtr; i++)
{
int triid = tna[i];
switch (nodes[id].splitAxis)
{
case 0:
if (aabb[triid].bounds[0].x <= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
//DeviceVector<int>::push_back(tnahelper, tnahelperPtr, leftid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].x >= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
case 1:
if (aabb[triid].bounds[0].y <= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].y >= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
case 2:
if (aabb[triid].bounds[0].z <= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].z >= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
}
}
//printf("leftcount=%d\nrightcount=%d\n", leftcount, rightcount);
nodes[leftid].triangleNumber = leftcount;
nodes[rightid].triangleNumber = rightcount;
//printf("node %d was splited with left = %d and right = %d with tna=%d\n", id, leftcount, rightcount, *tnaPtr);
// add to nextList
nodes[leftid].depth = nodes[id].depth+1;
nodes[rightid].depth = nodes[id].depth+1;
if (leftcount > KDTREE_THRESHOLD)
DeviceVector<int>::push_back(smallList, smallListPtr, leftid);
if (rightcount > KDTREE_THRESHOLD)
DeviceVector<int>::push_back(smallList, smallListPtr, rightid);
}
__global__ void CalculateTriangleIndex(int start, int end, int base, KDTreeNode* nodes)
{
int count = 0;
int basecount = nodes[base].triangleIndex + nodes[base].triangleNumber;
for (int i = start; i <= end; i++)
{
nodes[i].triangleIndex = basecount + count;
count += nodes[i].triangleNumber;
}
}
__device__ float KDRayTraversal(KDTreeNode* root, Ray ray, float& minDist, float& distance, vec3 position)
{
if (root->triangleNumber <= 0)
return;
vec3 minBox = root->nodeAABB.bounds[0] + position;
vec3 maxBox = root->nodeAABB.bounds[1] + position;
if (ray.direction.x < 0)
{
ray.origin.x = minBox.x + maxBox.x - ray.origin.x;
ray.direction.x = -ray.direction.x;
}
if (ray.direction.y < 0)
{
ray.origin.y = minBox.y + maxBox.y - ray.origin.y;
ray.direction.y = -ray.direction.y;
}
if (ray.direction.z < 0)
{
ray.origin.z = minBox.z + maxBox.z - ray.origin.z;
ray.direction.z = -ray.direction.z;
}
vec3 div = 1.0f / ray.direction;
vec3 tMin = (minBox - ray.origin) * div;
vec3 tMax = (maxBox - ray.origin) * div;
float tmin = max(max(tMin.x, tMin.y), tMin.z);
float tmax = min(min(tMax.x, tMax.y), tMax.z);
if (tmin <= tmax)
{
if (tmin < minDist)
{
distance = tmin;
return true;
}
else
return false;
}
else
return false;
}
__device__ ObjectIntersection RayKDTreeTraversal(KDTreeNode* nodes, int* tna, Ray ray, Triangle* triangles, vec3 position)
{
int currentid = 0, leftid = 0, rightid = 0, cid = 0;
bool isHit = false;
float minDist = INF;
vec3 normal = vec3(0);
int materialID;
DeviceStack<int> treestack;
treestack.push(0);
float distance = -1.0f;
vec3 point;
while (!treestack.empty())
{
currentid = treestack.pop();
//test node intersection
if (KDRayTraversal(&nodes[currentid], ray, minDist, distance, position))
{
leftid = nodes[currentid].leftChild;
rightid = nodes[currentid].rightChild;
//// leaf node
if (leftid == -1)
{
for (int i = nodes[currentid].triangleIndex; i < nodes[currentid].triangleIndex + nodes[currentid].triangleNumber; i++)
{
ObjectIntersection intersection = triangles[tna[i]].Intersect(ray);
if (intersection.hit && intersection.t < minDist)
{
minDist = intersection.t;
isHit = true;
normal = intersection.normal;
materialID = intersection.materialID;
}
}
continue;
}
// middle node
if (leftid != -1)
{
point = ray.origin + ray.direction * distance;
if (nodes[currentid].splitAxis == 0)
{
if (point.x < nodes[currentid].nodeAABB.bounds[0].x + nodes[currentid].splitPos)
{
treestack.push(leftid);
treestack.push(rightid);
}
else
{
treestack.push(rightid);
treestack.push(leftid);
}
}
else if (nodes[currentid].splitAxis == 1)
{
if (point.y < nodes[currentid].nodeAABB.bounds[0].y + nodes[currentid].splitPos)
{
treestack.push(leftid);
treestack.push(rightid);
}
else
{
treestack.push(rightid);
treestack.push(leftid);
}
}
else if (nodes[currentid].splitAxis == 2)
{
if (point.z < nodes[currentid].nodeAABB.bounds[0].z + nodes[currentid].splitPos)
{
treestack.push(leftid);
treestack.push(rightid);
}
else
{
treestack.push(rightid);
treestack.push(leftid);
}
}
}
}
}
return ObjectIntersection(isHit, minDist, normal, materialID);
}
struct MaxX
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[1].x;
}
};
struct MaxY
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[1].y;
}
};
struct MaxZ
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[1].z;
}
};
struct MinX
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[0].x;
}
};
struct MinY
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[0].y;
}
};
struct MinZ
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[0].z;
}
};
struct KDTree
{
KDTree(){}
KDTree(Triangle* tri, int n)
{
h_Triangles = tri;
nTriangle = n;
rootAABB = AABB(h_Triangles, nTriangle);
printf("Root AABB Size : Min : %f %f %f | Max : %f %f %f\n", rootAABB.bounds[0].x, rootAABB.bounds[0].y, rootAABB.bounds[0].z, rootAABB.bounds[1].x, rootAABB.bounds[1].y, rootAABB.bounds[1].z);
}
~KDTree() { freeMemory(); }
void Build()
{
int blocksize = (nTriangle + 255) / 256;
allocateMemory();
cout << "memcpy on gpu" << endl;
// calculate AABB
CreateAABB << <blocksize, 256 >> > (nTriangle, d_Triangles, d_AABB);
MidSplit();
SAHSplit();
cout << "gpu kdtree debug info:" << endl;
cout << nodes.size() << endl;
cout << triangleNodeAssociation.size() << endl;
}
AABB rootAABB;
int nTriangle;
Triangle* d_Triangles;
Triangle* h_Triangles;
AABB* d_AABB;
DeviceVector<KDTreeNode> nodes;
DeviceVector<int> triangleNodeAssociation;
DeviceVector<int> triangleNodeAssociationHelper;
DeviceVector<int> activeList;
DeviceVector<int> nextList;
DeviceVector<int> smallList;
private:
void allocateMemory()
{
gpuErrorCheck(hipMalloc((void**)&d_Triangles, sizeof(Triangle)*nTriangle));
gpuErrorCheck(hipMalloc((void**)&d_AABB, sizeof(AABB)*nTriangle));
gpuErrorCheck(hipMemcpy(d_Triangles, h_Triangles, sizeof(Triangle)*nTriangle, hipMemcpyHostToDevice));
nodes.allocateMemory(nTriangle / 3);
triangleNodeAssociation.allocateMemory(nTriangle * 30);
triangleNodeAssociationHelper.allocateMemory(nTriangle * 10);
activeList.allocateMemory(nTriangle / 3);
nextList.allocateMemory(nTriangle / 3);
smallList.allocateMemory(nTriangle / 3);
}
void freeMemory()
{
printf("KD Tree Free\n");
gpuErrorCheck(hipFree(d_Triangles));
gpuErrorCheck(hipFree(d_AABB));
}
AABB CalculateRootAABB()
{
thrust::device_ptr<AABB> thrustPtr(d_AABB);
float maxx = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MaxX(), 0, thrust::maximum<float>());
float maxy = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MaxY(), 0, thrust::maximum<float>());
float maxz = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MaxZ(), 0, thrust::maximum<float>());
float minx = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MinX(), 0, thrust::minimum<float>());
float miny = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MinY(), 0, thrust::minimum<float>());
float minz = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MinZ(), 0, thrust::minimum<float>());
gpuErrorCheck(hipDeviceSynchronize());
AABB tmp;
tmp.bounds[0] = vec3(minx, miny, minz);
tmp.bounds[1] = vec3(maxx, maxy, maxz);
return tmp;
}
void MidSplit()
{
InitRoot << <1, 1 >> > (nTriangle, nodes.data, nodes.d_ptr, activeList.data, activeList.d_ptr, nextList.d_ptr, smallList.d_ptr, triangleNodeAssociation.d_ptr, rootAABB);
gpuErrorCheck(hipDeviceSynchronize());
CopyTriangle << <(nTriangle + 255) / 256, 256 >> > (triangleNodeAssociation.data, nTriangle);
gpuErrorCheck(hipDeviceSynchronize());
while (!activeList.h_empty())
{
int base = nodes.size() - 1;
int startnode = nodes.size();
int start = triangleNodeAssociation.size();
triangleNodeAssociationHelper.h_clear();
MidSplitNode << <(activeList.size() + 255) / 256, 256 >> > (d_Triangles, d_AABB, nTriangle,
nodes.data,
nodes.d_ptr,
activeList.data,
activeList.d_ptr,
nextList.data,
nextList.d_ptr,
smallList.data,
smallList.d_ptr,
triangleNodeAssociation.data,
triangleNodeAssociation.d_ptr,
triangleNodeAssociationHelper.data,
triangleNodeAssociationHelper.d_ptr,
start);
gpuErrorCheck(hipDeviceSynchronize());
int end = triangleNodeAssociation.size();
int endnode = nodes.size() - 1;
int noftna = end - start;
thrust::sort_by_key(triangleNodeAssociationHelper.thrustPtr, triangleNodeAssociationHelper.thrustPtr + noftna, triangleNodeAssociation.thrustPtr + start);
gpuErrorCheck(hipDeviceSynchronize());
// calculate triangleIndex
CalculateTriangleIndex << <1, 1 >> > (startnode, endnode, base, nodes.data);
gpuErrorCheck(hipDeviceSynchronize());
// switch aciveList and nextList
//cout<<"nextlist size:"<<nextList.size()<<" tnasize="<<noftna<<endl;
gpuErrorCheck(hipMemcpy(activeList.data, nextList.data, sizeof(int)*nextList.size(), hipMemcpyDeviceToDevice));
gpuErrorCheck(hipMemcpy(activeList.d_ptr, nextList.d_ptr, sizeof(unsigned int), hipMemcpyDeviceToDevice));
nextList.h_clear();
triangleNodeAssociationHelper.h_clear();
gpuErrorCheck(hipDeviceSynchronize());
}
}
void SAHSplit()
{
{
while (!smallList.h_empty())
{
int base = nodes.size() - 1;
int startnode = nodes.size();
int start = triangleNodeAssociation.size();
triangleNodeAssociationHelper.h_clear();
SAHSplitNode << <(smallList.size() + 255) / 256, 256 >> > (d_Triangles, d_AABB, nTriangle,
nodes.data,
nodes.d_ptr,
smallList.data,
smallList.d_ptr,
nextList.data,
nextList.d_ptr,
triangleNodeAssociation.data,
triangleNodeAssociation.d_ptr,
triangleNodeAssociationHelper.data,
triangleNodeAssociationHelper.d_ptr,
start);
gpuErrorCheck(hipDeviceSynchronize());
int end = triangleNodeAssociation.size();
int endnode = nodes.size() - 1;
int noftna = end - start;
thrust::sort_by_key(triangleNodeAssociationHelper.thrustPtr, triangleNodeAssociationHelper.thrustPtr + noftna, triangleNodeAssociation.thrustPtr + start);
gpuErrorCheck(hipDeviceSynchronize());
// calculate triangleIndex
CalculateTriangleIndex << <1, 1 >> > (startnode, endnode, base, nodes.data);
gpuErrorCheck(hipDeviceSynchronize());
// switch aciveList and nextList
//cout<<"nextlist size:"<<nextList.size()<<" tnasize="<<noftna<<endl;
gpuErrorCheck(hipMemcpy(smallList.data, nextList.data, sizeof(int)*nextList.size(), hipMemcpyDeviceToDevice));
gpuErrorCheck(hipMemcpy(smallList.d_ptr, nextList.d_ptr, sizeof(unsigned int), hipMemcpyDeviceToDevice));
nextList.h_clear();
triangleNodeAssociationHelper.h_clear();
gpuErrorCheck(hipDeviceSynchronize());
}
}
}
};
#pragma endregion KDTree
struct Mesh
{
__host__ __device__ Mesh() {}
__host__ Mesh(vec3 position, const char* fileName = "", Material material = Material())
{
this->position = position;
std::string mtlBasePath;
std::string inputFile = fileName;
unsigned long pos = inputFile.find_last_of("/");
mtlBasePath = inputFile.substr(0, pos + 1);
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> obj_shapes;
std::vector<tinyobj::material_t> obj_materials;
std::vector<int> materialIDs;
printf("Loading %s...\n", fileName);
std::string err;
bool ret = tinyobj::LoadObj(&attrib, &obj_shapes, &obj_materials, &err, inputFile.c_str(), mtlBasePath.c_str());
if (!err.empty())
std::cerr << err << std::endl;
if (!ret) exit(1);
for (auto & obj_material : obj_materials)
{
std::string texturePath = "";
vec3 diffuseColor = vec3(obj_material.diffuse[0], obj_material.diffuse[1], obj_material.diffuse[2]);
vec3 emissionColor = vec3(obj_material.emission[0], obj_material.emission[1], obj_material.emission[2]);
materialIDs.push_back(materials.size());
materials.push_back(Material(material.type, diffuseColor, emissionColor));
}
long shapeSize, faceSize;
shapeSize = obj_shapes.size();
std::vector<Triangle>* triangles = new std::vector<Triangle>;
for (int i = 0; i < shapeSize; i++)
{
size_t index_offset = 0;
faceSize = obj_shapes[i].mesh.num_face_vertices.size();
for (size_t f = 0; f < faceSize; f++)
{
size_t fnum = obj_shapes[i].mesh.num_face_vertices[f];
vec3 pos[3];
vec3 nor[3];
for (int k = 0; k < 3; k++)
{
tinyobj::index_t idx = obj_shapes[i].mesh.indices[index_offset + k];
pos[k] = vec3(
attrib.vertices[3 * idx.vertex_index + 0],
attrib.vertices[3 * idx.vertex_index + 1],
attrib.vertices[3 * idx.vertex_index + 2]
);
nor[k] = vec3(
attrib.normals[3 * idx.normal_index + 0],
attrib.normals[3 * idx.normal_index + 1],
attrib.normals[3 * idx.normal_index + 2]
);
nor[k] = normalize(nor[k]);
}
Triangle triangle;
if (obj_shapes[i].mesh.material_ids[f] < materialIDs.size())
{
triangle = Triangle(pos[0], pos[1], pos[2], nor[0], nor[1], nor[2], materialIDs[obj_shapes[i].mesh.material_ids[f]]);
}
else
{
triangle = Triangle(pos[0], pos[1], pos[2], nor[0], nor[1], nor[2], 0);
}
triangles->push_back(triangle);
index_offset += fnum;
}
}
this->count = triangles->size();
this->triangles = triangles->data();
}
vec3 position;
Triangle* triangles;
int count;
KDTree* tree;
KDTreeNode* nodes;
int* tna;
__device__ ObjectIntersection Intersect(Ray ray)
{
#if ENABLE_KDTREE
ObjectIntersection intersection = ObjectIntersection();
intersection = RayKDTreeTraversal(nodes, tna, ray, triangles, position);
if (intersection.hit == true)
intersection.hitPtr = this;
return intersection;
#else
float tNear = INFINITY;
ObjectIntersection intersection = ObjectIntersection();
for (int i = 0; i < count; i++)
{
ObjectIntersection temp = triangles[i].Intersect(ray);
if (temp.hit && temp.t < tNear)
{
tNear = temp.t;
intersection = temp;
}
}
return intersection;
#endif
}
};
#pragma endregion Structs
#pragma region Kernels
__device__ ObjectIntersection Intersect(Ray ray, KernelArray<Sphere> spheres, KernelArray<Triangle> triangles)
{
ObjectIntersection intersection = ObjectIntersection();
ObjectIntersection temp = ObjectIntersection();
for (int i = 0; i < spheres.size; i++)
{
temp = spheres.array[i].Intersect(ray);
if (temp.hit)
{
if (intersection.t == 0 || temp.t < intersection.t)
{
intersection = temp;
}
}
}
for (int i = 0; i < triangles.size; i++)
{
float tNear = INFINITY;
ObjectIntersection triangleIntersection = ObjectIntersection();
for (int i = 0; i < triangles.size; i++)
{
ObjectIntersection temp = triangles.array[i].Intersect(ray);
if (temp.hit && temp.t < tNear)
{
tNear = temp.t;
triangleIntersection = temp;
}
}
if (triangleIntersection.hit)
{
if (intersection.t == 0 || triangleIntersection.t < intersection.t)
{
intersection = triangleIntersection;
}
}
}
return intersection;
}
__device__ Ray GetReflectedRay(Ray ray, vec3 hitPoint, glm::vec3 normal, vec3 &mask, Material material, hiprandState_t* randState)
{
switch (material.type)
{
case DIFF:
{
vec3 nl = dot(normal, ray.direction) < EPSILON ? normal : normal * -1.0f;
float r1 = two_pi<float>() * hiprand_uniform(randState);
float r2 = hiprand_uniform(randState);
float r2s = sqrt(r2);
vec3 w = nl;
vec3 u;
if (fabs(w.x) > 0.1f)
u = normalize(cross(vec3(0.0f, 1.0f, 0.0f), w));
else
u = normalize(cross(vec3(1.0f, 0.0f, 0.0f), w));
vec3 v = cross(w, u);
vec3 reflected = normalize((u * __cosf(r1) * r2s + v * __sinf(r1) * r2s + w * sqrt(1 - r2)));
mask *= material.color;
return Ray(hitPoint, reflected);
}
case GLOSS:
{
float phi = 2 * pi<float>() * hiprand_uniform(randState);
float r2 = hiprand_uniform(randState);
float phongExponent = 20;
float cosTheta = __powf(1 - r2, 1.0f / (phongExponent + 1));
float sinTheta = __sinf(1 - cosTheta * cosTheta);
vec3 w = normalize(ray.direction - normal * 2.0f * dot(normal, ray.direction));
vec3 u = normalize(cross((fabs(w.x) > .1 ? vec3(0, 1, 0) : vec3(1, 0, 0)), w));
vec3 v = cross(w, u);
vec3 reflected = normalize(u * __cosf(phi) * sinTheta + v * __sinf(phi) * sinTheta + w * cosTheta);
mask *= material.color;
return Ray(hitPoint, reflected);
}
case TRANS:
{
vec3 nl = dot(normal, ray.direction) < EPSILON ? normal : normal * -1.0f;
vec3 reflection = ray.direction - normal * 2.0f * dot(normal, ray.direction);
bool into = dot(normal, nl) > EPSILON;
float nc = 1.0f;
float nt = 1.5f;
float nnt = into ? nc / nt : nt / nc;
float Re, RP, TP, Tr;
vec3 tdir = vec3(0.0f, 0.0f, 0.0f);
float ddn = dot(ray.direction, nl);
float cos2t = 1.0f - nnt * nnt * (1.0f - ddn * ddn);
if (cos2t < EPSILON) return Ray(hitPoint, reflection);
if (into)
tdir = normalize((ray.direction * nnt - normal * (ddn * nnt + sqrt(cos2t))));
else
tdir = normalize((ray.direction * nnt + normal * (ddn * nnt + sqrt(cos2t))));
float a = nt - nc;
float b = nt + nc;
float R0 = a * a / (b * b);
float c;
if (into)
c = 1 + ddn;
else
c = 1 - dot(tdir, normal);
Re = R0 + (1 - R0) * c * c * c * c * c;
Tr = 1 - Re;
float P = .25 + .5 * Re;
RP = Re / P;
TP = Tr / (1 - P);
if (hiprand_uniform(randState) < P)
{
mask *= (RP);
return Ray(hitPoint, reflection);
}
mask *= (TP);
return Ray(hitPoint, tdir);
}
case SPEC:
{
vec3 reflected = ray.direction - normal * 2.0f * dot(normal, ray.direction);
mask *= material.color;
return Ray(hitPoint, reflected);
}
}
}
// Path Tracing + Photon Map
__device__ vec3 TraceRay(Ray ray, KernelArray<Sphere> spheres, KernelArray<Triangle> triangles, KernelArray<Material> materials, bool directLighting, float directLightingConstant, hiprandState_t* randState)
{
vec3 resultColor = vec3(0);
vec3 mask = vec3(1);
for (int depth = 0; depth < MAX_DEPTH; depth++)
{
ObjectIntersection intersection = Intersect(ray, spheres, triangles);
if (intersection.hit == 0)
{
float longlatX = atan2(ray.direction.x, ray.direction.z);
longlatX = longlatX < EPSILON ? longlatX + two_pi<float>() : longlatX;
float longlatY = acos(-ray.direction.y);
float u = longlatX / two_pi<float>();
float v = longlatY / pi<float>();
int u2 = (int)(u * HDRWidth);
int tvec = (int)(v * HDRHeight);
int HDRtexelidx = u2 + tvec * HDRWidth;
float4 HDRcol = tex1Dfetch(HDRtexture, HDRtexelidx);
vec3 HDRcol2 = vec3(HDRcol.x, HDRcol.y, HDRcol.z);
return resultColor + (mask * HDRcol2);
}
vec3 hitPoint = ray.origin + ray.direction * intersection.t;
Material hitMaterial = materials.array[intersection.materialID];
vec3 emission = hitMaterial.emission;
float maxReflection = max(max(mask.r, mask.g), mask.b);
if (hiprand_uniform(randState) > maxReflection)
break;
resultColor += mask * emission;
ray = GetReflectedRay(ray, hitPoint, intersection.normal, mask, hitMaterial, randState);
mask *= 1 / maxReflection;
}
return resultColor;
}
// Real time + Photon Mapping Kernel
__global__ void PathKernel(Camera* camera, KernelArray<Sphere> spheres, KernelArray<Triangle> triangles, KernelArray<Material> materials, int loopX, int loopY, bool dof, bool directLighting, float directLightingConstant, int frame, hipSurfaceObject_t surface)
{
int width = camera->width;
int height = camera->height;
int x = gridDim.x * blockDim.x * loopX + blockIdx.x * blockDim.x + threadIdx.x;
int y = gridDim.y * blockDim.y * loopY + blockIdx.y * blockDim.y + threadIdx.y;
int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int i = y * width + x;
if (i >= width * height) return;
hiprandState_t randState;
float4 originColor;
surf2Dread(&originColor, surface, x * sizeof(float4), y);
vec3 resultColor = vec3(0, 0, 0);
hiprand_init(WangHash(threadId) + WangHash(frame), 0, 0, &randState);
Ray ray = camera->GetRay(&randState, x, y, dof);
vec3 color = TraceRay(ray, spheres, triangles, materials, directLighting, directLightingConstant, &randState);
resultColor = (vec3(originColor.x, originColor.y, originColor.z) * (float)(frame - 1) + color) / (float)frame;
surf2Dwrite(make_float4(resultColor.r, resultColor.g, resultColor.b, 1.0f), surface, x * sizeof(float4), y);
}
// Photon Mapping Rendering Loop
void TracingLoop(Camera* camera, KernelArray<Sphere> spheres, KernelArray<Triangle> triangles, KernelArray<Material> materials, int frame, bool dof, bool directLighting, float directLightingConstant, hipSurfaceObject_t surface)
{
hipDeviceSetLimit(hipLimitMallocHeapSize, 5000000000 * sizeof(float));
for (int i = 0; i < TRACE_OUTER_LOOP_X; i++)
{
for (int j = 0; j < TRACE_OUTER_LOOP_Y; j++)
{
PathKernel << <grid, block >> > (camera, spheres, triangles, materials, i, j, dof, directLighting, directLightingConstant, frame, surface);
gpuErrorCheck(hipDeviceSynchronize());
}
}
}
void RenderRealTime(hipSurfaceObject_t surface, bool dof, bool photon, bool directLighting, int frame)
{
int width = camera->width;
int height = camera->height;
hipEvent_t start, stop;
gpuErrorCheck(hipEventCreate(&start));
gpuErrorCheck(hipEventRecord(start, 0));
Camera* cudaCamera;
gpuErrorCheck(hipMalloc(&cudaCamera, sizeof(Camera)));
gpuErrorCheck(hipMemcpy(cudaCamera, camera, sizeof(Camera), hipMemcpyHostToDevice));
thrust::device_vector<Sphere> cudaSpheres(spheres);
thrust::device_vector<Material> cudaMaterials(materials);
thrust::host_vector<Triangle> triangles;
thrust::device_vector<Triangle> cudaTriangles;
for (auto & mesh : meshes)
{
for (int i = 0; i < mesh.count; i++)
{
for (auto & pos : mesh.triangles[i].pos)
{
pos += + mesh.position;
}
triangles.push_back(mesh.triangles[i]);
}
}
cudaTriangles = triangles;
// int meshCount = sizeof(meshes) / sizeof(Mesh);
// Mesh* cudaMeshes;
// std::vector<Mesh> meshVector;
// std::vector<Triangle*> triangleVector;
// for (int i = 0; i < meshCount; i++)
// {
// Mesh currentMesh = meshes[i];
// Mesh cudaMesh = currentMesh;
// Triangle* cudaTriangles;
// gpuErrorCheck(hipMalloc(&cudaTriangles, sizeof(Triangle) * currentMesh.count));
// gpuErrorCheck(hipMemcpy(cudaTriangles, currentMesh.triangles, sizeof(Triangle) * currentMesh.count, hipMemcpyHostToDevice));
//
//#if ENABLE_KDTREE
// cudaMesh.nodes = currentMesh.tree->nodes.data;
// cudaMesh.tna = currentMesh.tree->triangleNodeAssociation.data;
//#endif
// cudaMesh.triangles = cudaTriangles;
// meshVector.push_back(cudaMesh);
// triangleVector.push_back(cudaTriangles);
// }
// gpuErrorCheck(hipMalloc(&cudaMeshes, sizeof(Mesh) * meshCount));
// gpuErrorCheck(hipMemcpy(cudaMeshes, meshVector.data(), sizeof(Mesh) * meshCount, hipMemcpyHostToDevice));
gpuErrorCheck(hipEventCreate(&stop));
gpuErrorCheck(hipEventRecord(stop, 0));
gpuErrorCheck(hipEventSynchronize(stop));
gpuErrorCheck(hipEventElapsedTime(&memoryAllocTime, start, stop));
gpuErrorCheck(hipEventDestroy(start));
gpuErrorCheck(hipEventDestroy(stop));
block = dim3(16, 9);
grid.x = ceil(ceil(width / TRACE_OUTER_LOOP_X) / block.x);
grid.y = ceil(ceil(height / TRACE_OUTER_LOOP_Y) / block.y);
gpuErrorCheck(hipEventCreate(&start));
gpuErrorCheck(hipEventRecord(start, 0));
TracingLoop(cudaCamera, ConvertToKernel(cudaSpheres), ConvertToKernel(cudaTriangles), ConvertToKernel(cudaMaterials), frame, dof, directLighting, directLightingConstant, surface);
gpuErrorCheck(hipDeviceSynchronize());
gpuErrorCheck(hipEventCreate(&stop));
gpuErrorCheck(hipEventRecord(stop, 0));
gpuErrorCheck(hipEventSynchronize(stop));
gpuErrorCheck(hipEventElapsedTime(&renderingTime, start, stop));
gpuErrorCheck(hipEventDestroy(start));
gpuErrorCheck(hipEventDestroy(stop));
gpuErrorCheck(hipFree(cudaCamera));
}
#pragma endregion Kernels
#pragma region Opengl Callbacks
void Keyboard(unsigned char key, int x, int y)
{
keyState[key] = true;
mousePos[0] = x;
mousePos[1] = y;
if (IsKeyDown('r'))
{
enableDof = !enableDof;
cudaDirty = true;
}
if (IsKeyDown('b'))
{
enablePhoton = !enablePhoton;
cudaDirty = true;
}
if (IsKeyDown('q'))
{
enableSaveImage = true;
frame = 1;
cudaDirty = false;
cudaToggle = true;
}
if (IsKeyDown('f'))
{
cudaToggle = !cudaToggle;
frame = 1;
cudaDirty = false;
}
if (IsKeyDown('n'))
{
enableDrawNormal = !enableDrawNormal;
cudaToggle = false;
}
if (IsKeyDown('k'))
{
enableDrawKDTree = !enableDrawKDTree;
cudaToggle = false;
}
if (IsKeyDown('l'))
{
enableDirectLighting = !enableDirectLighting;
cudaDirty = true;
}
if (IsKeyDown('t'))
{
camera->aperture += 0.1f;
cudaDirty = true;
printf("%f %f\n", camera->aperture, camera->focalDistance);
}
if (IsKeyDown('g'))
{
camera->aperture -= 0.1f;
cudaDirty = true;
printf("%f %f\n", camera->aperture, camera->focalDistance);
}
if (IsKeyDown('y'))
{
camera->focalDistance += 0.5f;
cudaDirty = true;
printf("%f %f\n", camera->aperture, camera->focalDistance);
}
if (IsKeyDown('h'))
{
camera->focalDistance -= 0.5f;
cudaDirty = true;
printf("%f %f\n", camera->aperture, camera->focalDistance);
}
if (IsKeyDown('p'))
{
printf("Camera Position : %f %f %f\n", camera->position.x, camera->position.y, camera->position.z);
printf("Pitch Yaw : %f %f\n", camera->pitch, camera->yaw);
}
if (IsKeyDown('u'))
{
enableGUI = !enableGUI;
}
ImGuiIO& io = ImGui::GetIO();
io.AddInputCharacter(key);
glutPostRedisplay();
}
void KeyboardUp(unsigned char key, int x, int y)
{
keyState[key] = false;
mousePos[0] = x;
mousePos[1] = y;
glutPostRedisplay();
}
void Special(int key, int x, int y)
{
ImGuiIO& io = ImGui::GetIO();
io.AddInputCharacter(key);
glutPostRedisplay();
}
void SpecialUp(int key, int x, int y)
{
glutPostRedisplay();
}
void Mouse(int button, int state, int x, int y)
{
mousePos[0] = x;
mousePos[1] = y;
mouseState[button] = !state;
ImGuiIO& io = ImGui::GetIO();
io.MousePos = ImVec2(float(x), float(y));
if (state == GLUT_DOWN && (button == GLUT_LEFT_BUTTON))
io.MouseDown[0] = true;
else
io.MouseDown[0] = false;
if (state == GLUT_DOWN && (button == GLUT_RIGHT_BUTTON))
io.MouseDown[1] = true;
else
io.MouseDown[1] = false;
glutPostRedisplay();
}
void MouseWheel(int button, int dir, int x, int y)
{
if (dir > 0)
{
camera->fov++;
cudaDirty = true;
}
else
{
camera->fov--;
cudaDirty = true;
}
glutPostRedisplay();
}
void Motion(int x, int y)
{
mousePos[0] = x;
mousePos[1] = y;
ImGuiIO& io = ImGui::GetIO();
io.MousePos = ImVec2(float(x), float(y));
glutPostRedisplay();
}
void Reshape(int w, int h)
{
camera->UpdateScreen(w, h);
}
void Idle()
{
int timeSinceStart = glutGet(GLUT_ELAPSED_TIME);
deltaTime = (timeSinceStart - oldTimeSinceStart) * 0.001f;
oldTimeSinceStart = timeSinceStart;
glutPostRedisplay();
}
void Display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
camera->UpdateCamera(deltaTime);
// OpenGL Draw
if (cudaToggle)
{
int width = camera->width;
int height = camera->height;
glColor3f(1, 1, 1);
glDisable(GL_LIGHTING);
hipGraphicsGLRegisterImage(&viewResource, viewGLTexture, GL_TEXTURE_2D, hipGraphicsRegisterFlagsWriteDiscard);
hipGraphicsMapResources(1, &viewResource);
hipGraphicsSubResourceGetMappedArray(&viewArray, viewResource, 0, 0);
hipResourceDesc viewCudaArrayResourceDesc;
{
viewCudaArrayResourceDesc.resType = hipResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewArray;
}
hipSurfaceObject_t viewCudaSurfaceObject;
gpuErrorCheck(hipCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc));
{
if (cudaDirty)
{
frame = 0;
cudaDirty = false;
}
RenderRealTime(viewCudaSurfaceObject, enableDof, enablePhoton, enableDirectLighting, ++frame);
}
gpuErrorCheck(hipDestroySurfaceObject(viewCudaSurfaceObject));
gpuErrorCheck(hipGraphicsUnmapResources(1, &viewResource));
hipStreamSynchronize(0);
glLoadIdentity();
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, 0, height, -1000, 1000);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0, 1); glVertex2f(0, 0);
glTexCoord2f(1, 1); glVertex2f(width, 0);
glTexCoord2f(1, 0); glVertex2f(width, height);
glTexCoord2f(0, 0); glVertex2f(0, height);
}
glEnd();
}
if (enableSaveImage && frame >= imageSaveSamples)
{
enableSaveImage = false;
cudaToggle = false;
cudaDirty = false;
isSavingImage = false;
frame = 1;
GLubyte *pixels = new GLubyte[3 * width*height];
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, width, height, 3 * width, 24, FI_RGBA_RED_MASK, FI_RGBA_GREEN_MASK, FI_RGBA_BLUE_MASK, false);
SwapRedBlue32(image);
FreeImage_Save(FIF_PNG, image, "Result.png", 0);
FreeImage_Unload(image);
delete pixels;
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
glEnable(GL_LIGHTING);
}
else
{
// Draw Opengl
{
for (int n = 0; n < spheres.size(); n++)
{
glPushMatrix();
glTranslatef(spheres[n].position.x, spheres[n].position.y, spheres[n].position.z);
glColor3fv(value_ptr(materials[spheres[n].materialID].color));
int i, j;
int lats = 50;
int longs = 50;
float radius = spheres[n].radius;
for (i = 0; i <= lats; i++)
{
float lat0 = pi<float>() * (-float(0.5) + (float) (i - 1) / lats);
float z0 = radius * sin(lat0);
float zr0 = radius * cos(lat0);
float lat1 = pi<float>() * (-float(0.5) + (float) i / lats);
float z1 = radius * sin(lat1);
float zr1 = radius * cos(lat1);
glBegin(GL_QUAD_STRIP);
for (j = 0; j <= longs; j++)
{
float lng = 2 * pi<float>() * (float) (j - 1) / longs;
float x = cos(lng);
float y = sin(lng);
glNormal3f(x * zr1, y * zr1, z1);
glVertex3f(x * zr1, y * zr1, z1);
glNormal3f(x * zr0, y * zr0, z0);
glVertex3f(x * zr0, y * zr0, z0);
}
glEnd();
}
glPopMatrix();
}
for (int n = 0; n < meshes.size(); n++)
{
glPushMatrix();
glTranslatef(meshes[n].position.x, meshes[n].position.y, meshes[n].position.z);
Triangle* triangles = meshes[n].triangles;
for (int i = 0; i < meshes[n].count; i++)
{
glColor3fv(value_ptr(materials[triangles[i].materialID].color));
vec3 p0 = triangles[i].pos[0];
vec3 p1 = triangles[i].pos[1];
vec3 p2 = triangles[i].pos[2];
vec3 normal = cross((p2 - p0), (p1 - p0));
normal = normalize(normal);
glBegin(GL_TRIANGLE_STRIP);
glNormal3fv(value_ptr(normal));
glVertex3fv(value_ptr(p0));
glVertex3fv(value_ptr(p1));
glVertex3fv(value_ptr(p2));
glEnd();
if (enableDrawNormal)
{
glLineWidth(1.0f);
glColor3f(1.0f, 1.0f, 1.0f);
glBegin(GL_LINES);
glVertex3fv(value_ptr(triangles[i].pos[0]));
glVertex3fv(value_ptr(triangles[i].nor[0] + triangles[i].pos[0]));
glVertex3fv(value_ptr(triangles[i].pos[1]));
glVertex3fv(value_ptr(triangles[i].nor[1] + triangles[i].pos[1]));
glVertex3fv(value_ptr(triangles[i].pos[2]));
glVertex3fv(value_ptr(triangles[i].nor[2] + triangles[i].pos[2]));
glEnd();
}
}
if (enableDrawKDTree)
{
glDisable(GL_LIGHTING);
int nodeSize = meshes[n].tree->nodes.size();
glLineWidth(1.0f);
KDTreeNode* nodes = new KDTreeNode[nodeSize];
meshes[n].tree->nodes.CopyToHost(nodes);
for (int i = 0; i < meshes[n].tree->nodes.size(); i++)
{
if (nodes[i].depth > KDTREE_MAX_DEPTH)
printf("WHAT %d\n", nodes[i].depth);
AABB box = nodes[i].nodeAABB;
vec3 corner[8];
corner[0] = { box.bounds[0].x, box.bounds[0].y, box.bounds[0].z };
corner[1] = { box.bounds[1].x, box.bounds[0].y, box.bounds[0].z };
corner[2] = { box.bounds[1].x, box.bounds[0].y, box.bounds[1].z };
corner[3] = { box.bounds[0].x, box.bounds[0].y, box.bounds[1].z };
corner[4] = { box.bounds[0].x, box.bounds[1].y, box.bounds[0].z };
corner[5] = { box.bounds[1].x, box.bounds[1].y, box.bounds[0].z };
corner[6] = { box.bounds[1].x, box.bounds[1].y, box.bounds[1].z };
corner[7] = { box.bounds[0].x, box.bounds[1].y, box.bounds[1].z };
glColor3f(1.0f, 1 - (i / float(nodeSize)), 0.0f);
glLineWidth(i / float(nodeSize));
glBegin(GL_LINES);
glVertex3f(corner[0].x, corner[0].y, corner[0].z);
glVertex3f(corner[1].x, corner[1].y, corner[1].z);
glVertex3f(corner[1].x, corner[1].y, corner[1].z);
glVertex3f(corner[2].x, corner[2].y, corner[2].z);
glVertex3f(corner[2].x, corner[2].y, corner[2].z);
glVertex3f(corner[3].x, corner[3].y, corner[3].z);
glVertex3f(corner[3].x, corner[3].y, corner[3].z);
glVertex3f(corner[0].x, corner[0].y, corner[0].z);
glVertex3f(corner[0].x, corner[0].y, corner[0].z);
glVertex3f(corner[4].x, corner[4].y, corner[4].z);
glVertex3f(corner[1].x, corner[1].y, corner[1].z);
glVertex3f(corner[5].x, corner[5].y, corner[5].z);
glVertex3f(corner[2].x, corner[2].y, corner[2].z);
glVertex3f(corner[6].x, corner[6].y, corner[6].z);
glVertex3f(corner[3].x, corner[3].y, corner[3].z);
glVertex3f(corner[7].x, corner[7].y, corner[7].z);
glVertex3f(corner[4].x, corner[4].y, corner[4].z);
glVertex3f(corner[5].x, corner[5].y, corner[5].z);
glVertex3f(corner[5].x, corner[5].y, corner[5].z);
glVertex3f(corner[6].x, corner[6].y, corner[6].z);
glVertex3f(corner[6].x, corner[6].y, corner[6].z);
glVertex3f(corner[7].x, corner[7].y, corner[7].z);
glVertex3f(corner[7].x, corner[7].y, corner[7].z);
glVertex3f(corner[4].x, corner[4].y, corner[4].z);
glEnd();
}
delete[] nodes;
glEnable(GL_LIGHTING);
}
glPopMatrix();
}
}
}
int width = camera->width;
int height = camera->height;
ImGui_ImplGLUT_NewFrame(width, height);
// UI
if (enableGUI)
{
ImGui::Begin("Cuda Tracer", nullptr, ImVec2(0,0), -1.0f, ImGuiWindowFlags_AlwaysAutoResize);
ImGui::SetNextWindowPos(ImVec2(0, 0), ImGuiSetCond_Once);
ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate);
if (cudaToggle)
{
ImGui::Text("Current Frame : %d", frame);
if (isSavingImage)
{
ImGui::PushItemFlag(ImGuiItemFlags_Disabled, true);
ImGui::PushStyleVar(ImGuiStyleVar_Alpha, ImGui::GetStyle().Alpha * 0.5f);
}
if (ImGui::Button("Save Image"))
{
GLubyte *pixels = new GLubyte[3 * width*height];
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, width, height, 3 * width, 24, FI_RGBA_RED_MASK, FI_RGBA_GREEN_MASK, FI_RGBA_BLUE_MASK, false);
SwapRedBlue32(image);
stringstream ss;
ss << "Result_" << frame << ".png";
FreeImage_Save(FIF_PNG, image, ss.str().c_str(), 0);
FreeImage_Unload(image);
delete pixels;
}
ImGui::Text("Memory Allocation Time : %f ms", memoryAllocTime);
ImGui::Text("Rendering time : %f ms", renderingTime);
ImGui::Text("Total Time : %f ms", memoryAllocTime + renderingTime);
if (ImGui::Checkbox("Enable Dof", &enableDof))
cudaDirty = true;
if (enableDof)
{
if (ImGui::SliderFloat("Focal Distance", &(camera->focalDistance), EPSILON, 500))
cudaDirty = true;
if (ImGui::SliderFloat("Aperture", &(camera->aperture), EPSILON, 50))
cudaDirty = true;
}
//if (ImGui::Checkbox("Enable Direct Lighting", &enableDirectLighting))
// cudaDirty = true;
//if (ImGui::Checkbox("Enable Photon Mapping", &enablePhoton))
// cudaDirty = true;
//if (ImGui::SliderFloat("Direct Lighting Weight", &directLightingConstant, EPSILON, 1000.0f))
// cudaDirty = true;
if (isSavingImage)
{
ImGui::PopItemFlag();
ImGui::PopStyleVar();
}
}
else
{
ImGui::InputInt("Image Samples", &imageSaveSamples, 1, 1000);
ImGui::SameLine();
if (ImGui::Button("Save Image"))
{
enableSaveImage = true;
frame = 1;
cudaDirty = false;
cudaToggle = true;
isSavingImage = true;
}
if (ImGui::Checkbox("Draw Normal", &enableDrawNormal))
cudaDirty = true;
if (ImGui::Checkbox("Draw Debug KDTree AABBox", &enableDrawKDTree))
cudaDirty = true;
}
if (!isSavingImage)
{
int sphereCount = sizeof(spheres) / sizeof(Sphere);
int meshCount = sizeof(meshes) / sizeof(Mesh);
if (ImGui::CollapsingHeader("Objects"))
{
ImGui::Text("Spheres : %d", sphereCount);
ImGui::Text("Meshes : %d", meshCount);
ImGui::SliderInt("Current Object", &objectIndex, 0, sphereCount + meshCount - 1);
if (objectIndex < sphereCount)
{
if (ImGui::SliderFloat3("Position", value_ptr(spheres[objectIndex].position), -100.0f, 100.0f))
cudaDirty = true;
if (ImGui::SliderFloat("Radius", &(spheres[objectIndex].radius), EPSILON, 100))
cudaDirty = true;
//if (ImGui::ListBox("Material Type", (int*)&(spheres[objectIndex].material.type), MATERIAL_TYPE_ARRAY, IM_ARRAYSIZE(MATERIAL_TYPE_ARRAY)))
// cudaDirty = true;
//if (ImGui::SliderFloat3("Color", value_ptr(spheres[objectIndex].material.color), 0.0f, 1.0f))
// cudaDirty = true;
//if (ImGui::SliderFloat3("Emission", value_ptr(spheres[objectIndex].material.emission), 0.0f, 10.0f))
// cudaDirty = true;
}
else
{
int meshIndex = objectIndex - sphereCount;
ImGui::Text("Triangles : %d", meshes[meshIndex].count);
if (ImGui::SliderFloat3("Position", value_ptr(meshes[meshIndex].position), -100.0f, 100.0f))
cudaDirty = true;
}
}
}
ImGui::End();
ImGui::Render();
}
glutSwapBuffers();
}
#pragma endregion Opengl Callbacks
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE | GLUT_MULTISAMPLE);
glutInitWindowPosition(0, 0);
glutInitWindowSize(WIDTH, HEIGHT);
glutCreateWindow("Cuda Tracer");
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_GLUTMAINLOOP_RETURNS);
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK)
{
fprintf(stderr, "Failed to initialize GLEW()\n");
return -1;
}
GLfloat ambient[] = { 0.2f, 0.2f, 0.2f, 1.0f };
GLfloat diffuse[] = { 1.0f, 1.0f, 1.0f, 1.0f };
GLfloat specular[] = { 1.0f, 1.0f, 1.0f, 1.0f };
GLfloat position[] = { 0.0f, 40.0f, 0.0f, 0.0f };
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse);
glLightfv(GL_LIGHT0, GL_SPECULAR, specular);
glLightfv(GL_LIGHT0, GL_POSITION, position);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL);
glMaterialfv(GL_FRONT, GL_SPECULAR, specular);
glMateriali(GL_FRONT, GL_SHININESS, 15);
glShadeModel(GL_SMOOTH);
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glClearColor(0.6, 0.65, 0.85, 0);
FreeImage_Initialise();
{
// imgui
ImGui::CreateContext();
ImGuiIO& io = ImGui::GetIO(); (void) io;
ImGui_ImplGLUT_Init();
ImGui::StyleColorsDark();
}
{
// Init Scene
camera = new Camera;
materials.push_back(Material());
spheres.push_back(Sphere(vec3(0, 45, 0), 1.0f, Material(DIFF, vec3(1), vec3(2.2f, 2.2f, 2.2f))));
spheres.push_back(Sphere(vec3(-10, 8, -10), 8, Material(SPEC)));
spheres.push_back(Sphere(vec3(-10, 8, 10), 8, Material(TRANS)));
meshes.push_back(Mesh(vec3(0, 0, 0), "Cornell.obj", Material(DIFF)));
}
{
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, WIDTH, HEIGHT, 0, GL_RGBA, GL_FLOAT, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
}
{
FREE_IMAGE_FORMAT fif = FIF_HDR;
FIBITMAP *src(nullptr);
FIBITMAP *dst(nullptr);
BYTE* bits(nullptr);
float4* cpuHDRmap;
src = FreeImage_Load(fif, HDR_FILE_NAME);
dst = FreeImage_ToneMapping(src, FITMO_REINHARD05);
bits = FreeImage_GetBits(dst);
if (bits == nullptr)
return -1;
cpuHDRmap = new float4[HDRWidth * HDRHeight];
for (int x = 0; x < HDRWidth; x++)
{
for (int y = 0; y < HDRHeight; y++)
{
RGBQUAD rgbQuad;
FreeImage_GetPixelColor(dst, x, y, &rgbQuad);
cpuHDRmap[y*HDRWidth + x].x = rgbQuad.rgbRed / 256.0f;
cpuHDRmap[y*HDRWidth + x].y = rgbQuad.rgbGreen / 256.0f;
cpuHDRmap[y*HDRWidth + x].z = rgbQuad.rgbBlue / 256.0f;
cpuHDRmap[y*HDRWidth + x].w = 1.0f;
}
}
gpuErrorCheck(hipMalloc(&cudaHDRmap, HDRWidth * HDRHeight * sizeof(float4)));
gpuErrorCheck(hipMemcpy(cudaHDRmap, cpuHDRmap, HDRWidth * HDRHeight * sizeof(float4), hipMemcpyHostToDevice));
HDRtexture.filterMode = hipFilterModeLinear;
hipChannelFormatDesc channel4desc = hipCreateChannelDesc<float4>();
hipBindTexture(NULL, &HDRtexture, cudaHDRmap, &channel4desc, HDRWidth * HDRHeight * sizeof(float4));
printf("Load HDR Map Success\n");
printf("Width : %d\nHeight : %d\n", HDRWidth, HDRHeight);
FreeImage_Unload(src);
FreeImage_Unload(dst);
delete cpuHDRmap;
}
#if ENABLE_KDTREE
int mesheCount = sizeof(meshes) / sizeof(Mesh);
for (int i = 0; i < mesheCount; i++)
{
meshes[i].tree = new KDTree(meshes[i].triangles, meshes[i].count);
meshes[i].tree->Build();
}
#endif
glutKeyboardFunc(Keyboard);
glutKeyboardUpFunc(KeyboardUp);
glutSpecialFunc(Special);
glutSpecialUpFunc(SpecialUp);
glutReshapeFunc(Reshape);
glutIdleFunc(Idle);
glutMouseWheelFunc(MouseWheel);
glutMouseFunc(Mouse);
glutPassiveMotionFunc(Motion);
glutMotionFunc(Motion);
glutDisplayFunc(Display);
glutMainLoop();
// Cleanup
hipDeviceReset();
ImGui_ImplGLUT_Shutdown();
ImGui::DestroyContext();
return 0;
} | 17a4cd3870f90f0cfa42cd7b6e8c2ba7a975c003.cu | #include <GL/glew.h>
#include <GL/freeglut.h>
#include <glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#define TINYOBJLOADER_IMPLEMENTATION
#include "tiny_obj_loader.h"
#include "kernel.cuh"
#include "Input.h"
#pragma region Structs
struct Ray
{
vec3 origin;
vec3 direction;
__host__ __device__ Ray(vec3 origin, vec3 direction)
{
this->origin = origin + direction * (ENABLE_SURFACE_ACNE ? 0 : EPSILON);
this->direction = direction;
}
};
struct Photon
{
__host__ __device__ Photon()
{
position = vec3(0, 0, 0);
normal = vec3(0, 0, 0);
power = vec3(0, 0, 0);
type = NONE;
isHit = false;
}
vec3 position;
vec3 normal;
vec3 power;
MaterialType type;
bool isHit;
};
struct Camera
{
__host__ __device__ Camera()
{
proj = glm::mat4(1.0f);
position = glm::vec3(0.0f, 46.0f, 126.0f);
fov = 70.0f;
nearPlane = 0.1f;
farPlane = 1000.0f;
moveSpeed = 25.0f;
mouseSpeed = 10.0f;
pitch = 0.0f;
yaw = 180.0f;
view = mat4(0);
proj = mat4(0);
aperture = 0;
focalDistance = 0.1f;
}
__device__ Ray GetRay(curandState* randState, int x, int y, bool dof)
{
float jitterValueX = curand_uniform(randState) - 0.5;
float jitterValueY = curand_uniform(randState) - 0.5;
vec3 wDir = glm::normalize(-forward);
vec3 uDir = glm::normalize(cross(up, wDir));
vec3 vDir = glm::cross(wDir, -uDir);
float top = __tanf(fov * glm::pi<float>() / 360.0f);
float right = aspectRatio * top;
float bottom = -top;
float left = -right;
float imPlaneUPos = left + (right - left)*(((float)x + jitterValueX) / (float)width);
float imPlaneVPos = bottom + (top - bottom)*(((float)y + jitterValueY) / (float)height);
vec3 originDirection = imPlaneUPos * uDir + imPlaneVPos * vDir - wDir;
vec3 pointOnImagePlane = position + ((originDirection) * focalDistance);
if (dof)
{
vec3 aperturePoint = vec3(0, 0, 0);
if (aperture >= EPSILON)
{
float r1 = curand_uniform(randState);
float r2 = curand_uniform(randState);
float angle = two_pi<float>() * r1;
float distance = aperture * sqrt(r2);
float apertureX = __cosf(angle) * distance;
float apertureY = __sinf(angle) * distance;
aperturePoint = position + (wDir * apertureX) + (uDir * apertureY);
}
else
{
aperturePoint = position;
}
return Ray(aperturePoint, normalize(pointOnImagePlane - aperturePoint));
}
else
{
return Ray(position, normalize(originDirection));
}
}
void UpdateScreen(int width, int height)
{
this->width = width;
this->height = height;
this->aspectRatio = width / (float)height;
glViewport(0, 0, width, height);
proj = perspective(radians(fov), aspectRatio, nearPlane, farPlane);
}
void UpdateCamera(float deltaTime)
{
vec2 input = vec2(IsKeyDown('w') ? 1 : IsKeyDown('s') ? -1 : 0, IsKeyDown('d') ? 1 : IsKeyDown('a') ? -1 : 0);
if (IsMouseDown(1))
HandleRotate(deltaTime);
HandleMove(input, deltaTime);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(fov, aspectRatio, nearPlane, farPlane);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
forward.x = cos(radians(pitch)) * sin(radians(yaw));
forward.y = sin(radians(pitch));
forward.z = cos(radians(pitch)) * cos(radians(yaw));
forward = normalize(forward);
right = normalize(cross(forward, vec3(0, 1, 0)));
up = normalize(cross(right, forward));
mat4 viewMatrix = lookAt(position, position + forward, up);
if (view != viewMatrix)
{
cudaDirty = true;
view = viewMatrix;
}
glMultMatrixf(value_ptr(view));
toggleMouseMovement = IsMouseDown(1);
}
bool toggleMouseMovement;
float width, height;
float moveSpeed, mouseSpeed;
float nearPlane, farPlane;
float fov;
float aspectRatio;
float pitch, yaw;
// fov
float aperture, focalDistance;
vec3 position;
vec3 forward, up, right;
mat4 view;
mat4 proj;
private:
void HandleRotate(float deltaTime)
{
if (toggleMouseMovement == false)
{
WarpMouse(width / 2, height / 2);
return;
}
int xPos, yPos;
GetMousePos(xPos, yPos);
pitch += mouseSpeed * float(height / 2 - yPos) * deltaTime;
yaw += mouseSpeed * float(width / 2 - xPos) * deltaTime;
pitch = clamp(pitch, -89.0f, 89.0f);
yaw = mod(yaw, 360.0f);
WarpMouse(width / 2, height / 2);
}
void HandleMove(vec2 input, float deltaTime)
{
position += (forward * input.x + right * input.y) * deltaTime * moveSpeed;
}
};
struct Material
{
__host__ __device__ Material(MaterialType type = DIFF, vec3 color = vec3(0), vec3 emission = vec3(0))
{
this->type = type;
this->color = color;
this->emission = emission;
}
MaterialType type;
vec3 color;
vec3 emission;
};
struct ObjectIntersection
{
__host__ __device__ ObjectIntersection(bool hit = false, float t = 0, vec3 normal = vec3(0), int materialID = -1)
{
this->hit = hit;
this->t = t;
this->normal = normal;
this->materialID = materialID;
}
bool hit;
float t;
vec3 normal;
int materialID;
};
struct Triangle
{
__host__ __device__ Triangle(vec3 p0 = vec3(0), vec3 p1 = vec3(0), vec3 p2 = vec3(0), vec3 n0 = vec3(0), vec3 n1 = vec3(0), vec3 n2 = vec3(0), int materialID = 0)
{
pos[0] = p0; pos[1] = p1; pos[2] = p2;
nor[0] = normalize(n0); nor[1] = normalize(n1); nor[2] = normalize(n2);
this->materialID = materialID;
}
__device__ ObjectIntersection Intersect(const Ray &ray) const
{
bool hit = false;
float u, v, t = 0;
vec3 normal = vec3(0);
vec3 v0v1 = pos[1] - pos[0];
vec3 v0v2 = pos[2] - pos[0];
vec3 pvec = cross(ray.direction, v0v2);
float det = dot(v0v1, pvec);
if (fabs(det) < EPSILON) return ObjectIntersection(hit, t, normal, materialID);
float invDet = 1.0f / det;
vec3 tvec = ray.origin - pos[0];
u = dot(tvec, pvec) * invDet;
if (u < 0 || u > 1) return ObjectIntersection(hit, t, normal, materialID);
vec3 qvec = cross(tvec, v0v1);
v = dot(ray.direction, qvec) * invDet;
if (v < 0 || u + v > 1) return ObjectIntersection(hit, t, normal, materialID);
t = dot(v0v2, qvec) * invDet;
if (t < EPSILON) return ObjectIntersection(hit, t, normal, materialID);
if (ENABLE_SMOOTH_NORMAL)
normal = normalize((1 - u - v) * nor[0] + u * nor[1] + v * nor[2]);
else
normal = normalize(cross(v0v1, v0v2));
hit = true;
return ObjectIntersection(hit, t, normal, materialID);
}
vec3 pos[3];
vec3 nor[3];
int materialID;
};
struct Sphere
{
__host__ __device__ Sphere(vec3 position = vec3(0), float radius = 0, Material material = Material())
{
this->position = position;
this->radius = radius;
this->materialID = materials.size();
materials.push_back(material);
}
float radius;
vec3 position;
int materialID;
__device__ ObjectIntersection Intersect(const Ray &ray)
{
bool hit = false;
float distance = 0, t = 0;
vec3 normal = vec3(0, 0, 0);
vec3 op = position - ray.origin;
float b = dot(op, ray.direction);
float det = b * b - dot(op, op) + radius * radius;
if (det < EPSILON)
return ObjectIntersection(hit, t, normal, materialID);
else
det = glm::sqrt(det);
distance = (t = b - det) > EPSILON ? t : ((t = b + det) > EPSILON ? t : 0);
if (distance > EPSILON)
{
hit = true;
normal = normalize(ray.direction * distance - op);
}
return ObjectIntersection(hit, distance, normal, materialID);
}
__device__ vec3 RandomPoint(curandState* randState)
{
float theta = curand_uniform(randState) * pi<float>();
float phi = curand_uniform(randState) * two_pi<float>();
// Convert to Cartesian and scale by radius
float dxr = radius * sin(theta) * cos(phi);
float dyr = radius * sin(theta) * sin(phi);
float dzr = radius * cos(theta);
return vec3(position.x + dxr, position.y + dyr, position.z + dzr);
}
};
#pragma region KDTree
struct AABB
{
__device__ __host__ AABB()
{
bounds[0] = vec3(0);
bounds[1] = vec3(1);
}
__device__ __host__ AABB(vec3 min, vec3 max)
{
bounds[0] = min;
bounds[1] = max;
}
__device__ __host__ AABB(Triangle* triangles, int count)
{
for (int i = 0; i < count; i++)
{
Expand(triangles[i]);
}
}
__device__ __host__ void Expand(Triangle triangle)
{
Expand
(
vec3
(
min(min(triangle.pos[0].x, triangle.pos[1].x), triangle.pos[2].x),
min(min(triangle.pos[0].y, triangle.pos[1].y), triangle.pos[2].y),
min(min(triangle.pos[0].z, triangle.pos[1].z), triangle.pos[2].z)
),
vec3
(
max(max(triangle.pos[0].x, triangle.pos[1].x), triangle.pos[2].x),
max(max(triangle.pos[0].y, triangle.pos[1].y), triangle.pos[2].y),
max(max(triangle.pos[0].z, triangle.pos[1].z), triangle.pos[2].z)
)
);
}
__device__ __host__ void Expand(vec3 min, vec3 max)
{
if (min.x < bounds[0].x) bounds[0].x = min.x;
if (min.y < bounds[0].y) bounds[0].y = min.y;
if (min.z < bounds[0].z) bounds[0].z = min.z;
if (max.x > bounds[1].x) bounds[1].x = max.x;
if (max.y > bounds[1].y) bounds[1].y = max.y;
if (max.z > bounds[1].z) bounds[1].z = max.z;
}
vec3 bounds[2];
};
struct KDTreeNode
{
__device__ __host__ KDTreeNode(int l = -1, int r = -1, int sa = -1, int ti = 0, int tn = 0, float sp = 0, int d = 0)
{
leftChild = l; rightChild = r; splitAxis = sa; triangleIndex = ti; triangleNumber = tn; splitPos = sp; depth = d;
}
__device__ __host__ KDTreeNode(const KDTreeNode& g)
{
leftChild = g.leftChild; rightChild = g.rightChild; splitAxis = g.splitAxis; triangleIndex = g.triangleIndex;
triangleNumber = g.triangleNumber; splitPos = g.splitPos; nodeAABB = g.nodeAABB; depth = g.depth;
}
int leftChild;
int rightChild;
int splitAxis;
int triangleIndex;
int triangleNumber;
float splitPos;
int depth;
AABB nodeAABB;
};
__device__ void AABBMax(vec3* x, vec3* y, vec3* z, vec3* dist)
{
float xmax = x->x > y->x ? x->x : y->x;
xmax = xmax > z->x ? xmax : z->x;
float ymax = x->y > y->y ? x->y : y->y;
ymax = ymax > z->y ? ymax : z->y;
float zmax = x->z > y->z ? x->z : y->z;
zmax = zmax > z->z ? zmax : z->z;
dist->x = xmax;
dist->y = ymax;
dist->z = zmax;
}
__device__ void AABBMin(vec3* x, vec3* y, vec3* z, vec3* dist)
{
float xmax = x->x < y->x ? x->x : y->x;
xmax = xmax < z->x ? xmax : z->x;
float ymax = x->y < y->y ? x->y : y->y;
ymax = ymax < z->y ? ymax : z->y;
float zmax = x->z < y->z ? x->z : y->z;
zmax = zmax < z->z ? zmax : z->z;
dist->x = xmax;
dist->y = ymax;
dist->z = zmax;
}
__global__ void CreateAABB(int n, Triangle* tri, AABB* aabb)
{
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n)
return;
AABBMax(&(tri[tid].pos[0]), &(tri[tid].pos[1]), &(tri[tid].pos[2]), &(aabb[tid].bounds[1]));
AABBMin(&(tri[tid].pos[0]), &(tri[tid].pos[1]), &(tri[tid].pos[2]), &(aabb[tid].bounds[0]));
}
__global__ void InitRoot(int nTri, KDTreeNode* nodes, unsigned int* nodesPtr, int* activeList, unsigned int* activeListPtr, unsigned int* nextListPtr, unsigned int* smallListPtr, unsigned int* tnaPtr, AABB aabb)
{
DeviceVector<int>::clear(activeListPtr);
DeviceVector<int>::clear(nextListPtr);
DeviceVector<int>::clear(smallListPtr);
DeviceVector<int>::clear(tnaPtr);
DeviceVector<KDTreeNode>::clear(nodesPtr);
KDTreeNode n;
n.triangleIndex = 0;
n.triangleNumber = nTri;
n.nodeAABB = aabb;
n.depth = 0;
DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, n);
*(tnaPtr) = nTri;
int i = 0;
DeviceVector<int>::push_back(activeList, activeListPtr, i);
}
__global__ void CopyTriangle(int* tna, int n)
{
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= n)
return;
tna[tid] = tid;
}
__global__ void MidSplitNode(Triangle* tri, AABB* aabb, int nTri, KDTreeNode* nodes, unsigned int* nodesPtr, int* activeList, unsigned int* activeListPtr, int* nextList, unsigned int* nextListPtr, int* smallList, unsigned int* smallListPtr, int* tna, unsigned int* tnaPtr, int* tnahelper, unsigned int* tnahelperPtr, unsigned int tnaStartPtr)
{
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= *activeListPtr)
return;
//printf("tid=%d\n",tid);
int id = activeList[tid];
//printf("node triangle number=%d\n",nodes[id].triangleNumber);
int leftid;
int rightid;
float sp;
if (nodes[id].depth > KDTREE_MAX_DEPTH)
return;
//KDTreeNode currentNode(nodes[id]);
vec3 volume = nodes[id].nodeAABB.bounds[1] - nodes[id].nodeAABB.bounds[0];
if (volume.x >= volume.y && volume.x >= volume.z)// split x
{
nodes[id].splitAxis = 0;
sp = nodes[id].nodeAABB.bounds[0].x + volume.x / 2.0f;
nodes[id].splitPos = sp;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].x = sp;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].x = nodes[id].nodeAABB.bounds[1].x;
atarashiiNode.nodeAABB.bounds[0].x = sp;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
else if (volume.y >= volume.x && volume.y >= volume.z)// split y
{
nodes[id].splitAxis = 1;
sp = nodes[id].nodeAABB.bounds[0].y + volume.y / 2.0f;
nodes[id].splitPos = sp;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].y = sp;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].y = nodes[id].nodeAABB.bounds[1].y;
atarashiiNode.nodeAABB.bounds[0].y = sp;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
else // split z
{
nodes[id].splitAxis = 2;
sp = nodes[id].nodeAABB.bounds[0].z + volume.z / 2.0f;
nodes[id].splitPos = sp;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].z = sp;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].z = nodes[id].nodeAABB.bounds[1].z;
atarashiiNode.nodeAABB.bounds[0].z = sp;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
// split triangles
int leftcount = 0;
int rightcount = 0;
unsigned int tnapos;
int endPtr = nodes[id].triangleIndex + nodes[id].triangleNumber - 1;
/*printf("triangleIndex=%d\n", currentNode.triangleIndex);
printf("triangleNumber=%d\n", currentNode.triangleNumber);
printf("endPtr=%d\n", endPtr);*/
for (int i = nodes[id].triangleIndex; i <= endPtr; i++)
{
int triid = tna[i];
switch (nodes[id].splitAxis)
{
case 0:
if (aabb[triid].bounds[0].x <= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].x >= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
case 1:
if (aabb[triid].bounds[0].y <= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].y >= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
case 2:
if (aabb[triid].bounds[0].z <= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].z >= sp) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
}
}
//printf("leftcount=%d\nrightcount=%d\n", leftcount, rightcount);
nodes[leftid].triangleNumber = leftcount;
nodes[rightid].triangleNumber = rightcount;
nodes[leftid].depth = nodes[id].depth + 1;
nodes[rightid].depth = nodes[id].depth + 1;
//printf("node %d was splited with left = %d and right = %d with sp=%.5f tna=%d\n", id, leftcount, rightcount, sp, *tnaPtr);
// add to nextList
if (leftcount > KDTREE_THRESHOLD * 2)
DeviceVector<int>::push_back(nextList, nextListPtr, leftid);
else if (leftcount > KDTREE_THRESHOLD)
DeviceVector<int>::push_back(smallList, smallListPtr, leftid);
if (rightcount > KDTREE_THRESHOLD * 2)
DeviceVector<int>::push_back(nextList, nextListPtr, rightid);
else if (rightcount > KDTREE_THRESHOLD)
DeviceVector<int>::push_back(smallList, smallListPtr, rightid);
}
__global__ void SAHSplitNode(Triangle* tri, AABB* aabb, int nTri, KDTreeNode* nodes, unsigned int* nodesPtr, int* smallList, unsigned int* smallListPtr, int* nextList, unsigned int* nextListPtr, int* tna, unsigned int* tnaPtr, int* tnahelper, unsigned int* tnahelperPtr, unsigned int tnaStartPtr)
{
unsigned int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid >= *smallListPtr)
return;
//printf("tid=%d\n",tid);
int id = smallList[tid];
//printf("node triangle number=%d\n",nodes[id].triangleNumber);
int leftid;
int rightid;
float tpos;
//KDTreeNode currentNode(nodes[id]);
if (nodes[id].depth > KDTREE_MAX_DEPTH)
return;
vec3 volume = nodes[id].nodeAABB.bounds[1] - nodes[id].nodeAABB.bounds[0];
if (volume.x >= volume.y && volume.x >= volume.z)// split x
{
nodes[id].splitAxis = 0;
// looking for best candidate
float minsah = 999999.0f;
float minpos;
for (float p = 0.1f; p < 1.0f; p += 0.1f) {
tpos = nodes[id].nodeAABB.bounds[0].x + volume.x*p;
int ct1, ct2;
ct1 = ct2 = 0;
for (int i = nodes[id].triangleIndex, j = 0; j < nodes[id].triangleNumber; i++, j++) {
if ((aabb[tnaPtr[i]].bounds[0].x + aabb[tnaPtr[i]].bounds[1].x) / 2 < tpos)
ct1++;
else
ct2++;
}
float sah = ct1 * p + ct2 * (1 - p);
if (sah < minsah) {
minsah = sah;
minpos = tpos;
}
}
nodes[id].splitPos = tpos;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].x = tpos;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].x = nodes[id].nodeAABB.bounds[1].x;
atarashiiNode.nodeAABB.bounds[0].x = tpos;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
else if (volume.y >= volume.x && volume.y >= volume.z)// split y
{
nodes[id].splitAxis = 1;
// looking for best candidate
float minsah = 999999.0f;
float minpos;
for (float p = 0.1f; p < 1.0f; p += 0.1f) {
tpos = nodes[id].nodeAABB.bounds[0].y + volume.y*p;
int ct1, ct2;
ct1 = ct2 = 0;
for (int i = nodes[id].triangleIndex, j = 0; j < nodes[id].triangleNumber; i++, j++) {
if ((aabb[tnaPtr[i]].bounds[0].y + aabb[tnaPtr[i]].bounds[1].y) / 2 < tpos)
ct1++;
else
ct2++;
}
float sah = ct1 * p + ct2 * (1 - p);
if (sah < minsah) {
minsah = sah;
minpos = tpos;
}
}
nodes[id].splitPos = tpos;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].y = tpos;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].y = nodes[id].nodeAABB.bounds[1].y;
atarashiiNode.nodeAABB.bounds[0].y = tpos;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
else // split z
{
nodes[id].splitAxis = 2;
// looking for best candidate
float minsah = 999999.0f;
float minpos;
for (float p = 0.1f; p < 1.0f; p += 0.1f) {
tpos = nodes[id].nodeAABB.bounds[0].z + volume.z*p;
int ct1, ct2;
ct1 = ct2 = 0;
for (int i = nodes[id].triangleIndex, j = 0; j < nodes[id].triangleNumber; i++, j++) {
if ((aabb[tnaPtr[i]].bounds[0].z + aabb[tnaPtr[i]].bounds[1].z) / 2 < tpos)
ct1++;
else
ct2++;
}
float sah = ct1 * p + ct2 * (1 - p);
if (sah < minsah) {
minsah = sah;
minpos = tpos;
}
}
nodes[id].splitPos = tpos;
KDTreeNode atarashiiNode;
atarashiiNode.nodeAABB = nodes[id].nodeAABB;
atarashiiNode.nodeAABB.bounds[1].z = tpos;
leftid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].leftChild = leftid;
atarashiiNode.nodeAABB.bounds[1].z = nodes[id].nodeAABB.bounds[1].z;
atarashiiNode.nodeAABB.bounds[0].z = tpos;
rightid = DeviceVector<KDTreeNode>::push_back(nodes, nodesPtr, atarashiiNode);
nodes[id].rightChild = rightid;
}
//printf("sp=%.3f\n",sp);
// split triangles
int leftcount = 0;
int rightcount = 0;
unsigned int tnapos;
int endPtr = nodes[id].triangleIndex + nodes[id].triangleNumber - 1;
/*printf("triangleIndex=%d\n", currentNode.triangleIndex);
printf("triangleNumber=%d\n", currentNode.triangleNumber);
printf("endPtr=%d\n", endPtr);*/
for (int i = nodes[id].triangleIndex; i <= endPtr; i++)
{
int triid = tna[i];
switch (nodes[id].splitAxis)
{
case 0:
if (aabb[triid].bounds[0].x <= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
//DeviceVector<int>::push_back(tnahelper, tnahelperPtr, leftid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].x >= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
case 1:
if (aabb[triid].bounds[0].y <= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].y >= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
case 2:
if (aabb[triid].bounds[0].z <= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = leftid;
leftcount++;
}
if (aabb[triid].bounds[1].z >= tpos) {
tnapos = DeviceVector<int>::push_back(tna, tnaPtr, triid);
tnahelper[tnapos - tnaStartPtr] = rightid;
rightcount++;
}
break;
}
}
//printf("leftcount=%d\nrightcount=%d\n", leftcount, rightcount);
nodes[leftid].triangleNumber = leftcount;
nodes[rightid].triangleNumber = rightcount;
//printf("node %d was splited with left = %d and right = %d with tna=%d\n", id, leftcount, rightcount, *tnaPtr);
// add to nextList
nodes[leftid].depth = nodes[id].depth+1;
nodes[rightid].depth = nodes[id].depth+1;
if (leftcount > KDTREE_THRESHOLD)
DeviceVector<int>::push_back(smallList, smallListPtr, leftid);
if (rightcount > KDTREE_THRESHOLD)
DeviceVector<int>::push_back(smallList, smallListPtr, rightid);
}
__global__ void CalculateTriangleIndex(int start, int end, int base, KDTreeNode* nodes)
{
int count = 0;
int basecount = nodes[base].triangleIndex + nodes[base].triangleNumber;
for (int i = start; i <= end; i++)
{
nodes[i].triangleIndex = basecount + count;
count += nodes[i].triangleNumber;
}
}
__device__ float KDRayTraversal(KDTreeNode* root, Ray ray, float& minDist, float& distance, vec3 position)
{
if (root->triangleNumber <= 0)
return;
vec3 minBox = root->nodeAABB.bounds[0] + position;
vec3 maxBox = root->nodeAABB.bounds[1] + position;
if (ray.direction.x < 0)
{
ray.origin.x = minBox.x + maxBox.x - ray.origin.x;
ray.direction.x = -ray.direction.x;
}
if (ray.direction.y < 0)
{
ray.origin.y = minBox.y + maxBox.y - ray.origin.y;
ray.direction.y = -ray.direction.y;
}
if (ray.direction.z < 0)
{
ray.origin.z = minBox.z + maxBox.z - ray.origin.z;
ray.direction.z = -ray.direction.z;
}
vec3 div = 1.0f / ray.direction;
vec3 tMin = (minBox - ray.origin) * div;
vec3 tMax = (maxBox - ray.origin) * div;
float tmin = max(max(tMin.x, tMin.y), tMin.z);
float tmax = min(min(tMax.x, tMax.y), tMax.z);
if (tmin <= tmax)
{
if (tmin < minDist)
{
distance = tmin;
return true;
}
else
return false;
}
else
return false;
}
__device__ ObjectIntersection RayKDTreeTraversal(KDTreeNode* nodes, int* tna, Ray ray, Triangle* triangles, vec3 position)
{
int currentid = 0, leftid = 0, rightid = 0, cid = 0;
bool isHit = false;
float minDist = INF;
vec3 normal = vec3(0);
int materialID;
DeviceStack<int> treestack;
treestack.push(0);
float distance = -1.0f;
vec3 point;
while (!treestack.empty())
{
currentid = treestack.pop();
//test node intersection
if (KDRayTraversal(&nodes[currentid], ray, minDist, distance, position))
{
leftid = nodes[currentid].leftChild;
rightid = nodes[currentid].rightChild;
//// leaf node
if (leftid == -1)
{
for (int i = nodes[currentid].triangleIndex; i < nodes[currentid].triangleIndex + nodes[currentid].triangleNumber; i++)
{
ObjectIntersection intersection = triangles[tna[i]].Intersect(ray);
if (intersection.hit && intersection.t < minDist)
{
minDist = intersection.t;
isHit = true;
normal = intersection.normal;
materialID = intersection.materialID;
}
}
continue;
}
// middle node
if (leftid != -1)
{
point = ray.origin + ray.direction * distance;
if (nodes[currentid].splitAxis == 0)
{
if (point.x < nodes[currentid].nodeAABB.bounds[0].x + nodes[currentid].splitPos)
{
treestack.push(leftid);
treestack.push(rightid);
}
else
{
treestack.push(rightid);
treestack.push(leftid);
}
}
else if (nodes[currentid].splitAxis == 1)
{
if (point.y < nodes[currentid].nodeAABB.bounds[0].y + nodes[currentid].splitPos)
{
treestack.push(leftid);
treestack.push(rightid);
}
else
{
treestack.push(rightid);
treestack.push(leftid);
}
}
else if (nodes[currentid].splitAxis == 2)
{
if (point.z < nodes[currentid].nodeAABB.bounds[0].z + nodes[currentid].splitPos)
{
treestack.push(leftid);
treestack.push(rightid);
}
else
{
treestack.push(rightid);
treestack.push(leftid);
}
}
}
}
}
return ObjectIntersection(isHit, minDist, normal, materialID);
}
struct MaxX
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[1].x;
}
};
struct MaxY
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[1].y;
}
};
struct MaxZ
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[1].z;
}
};
struct MinX
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[0].x;
}
};
struct MinY
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[0].y;
}
};
struct MinZ
{
__host__ __device__ float operator()(AABB const& x)const {
return x.bounds[0].z;
}
};
struct KDTree
{
KDTree(){}
KDTree(Triangle* tri, int n)
{
h_Triangles = tri;
nTriangle = n;
rootAABB = AABB(h_Triangles, nTriangle);
printf("Root AABB Size : Min : %f %f %f | Max : %f %f %f\n", rootAABB.bounds[0].x, rootAABB.bounds[0].y, rootAABB.bounds[0].z, rootAABB.bounds[1].x, rootAABB.bounds[1].y, rootAABB.bounds[1].z);
}
~KDTree() { freeMemory(); }
void Build()
{
int blocksize = (nTriangle + 255) / 256;
allocateMemory();
cout << "memcpy on gpu" << endl;
// calculate AABB
CreateAABB << <blocksize, 256 >> > (nTriangle, d_Triangles, d_AABB);
MidSplit();
SAHSplit();
cout << "gpu kdtree debug info:" << endl;
cout << nodes.size() << endl;
cout << triangleNodeAssociation.size() << endl;
}
AABB rootAABB;
int nTriangle;
Triangle* d_Triangles;
Triangle* h_Triangles;
AABB* d_AABB;
DeviceVector<KDTreeNode> nodes;
DeviceVector<int> triangleNodeAssociation;
DeviceVector<int> triangleNodeAssociationHelper;
DeviceVector<int> activeList;
DeviceVector<int> nextList;
DeviceVector<int> smallList;
private:
void allocateMemory()
{
gpuErrorCheck(cudaMalloc((void**)&d_Triangles, sizeof(Triangle)*nTriangle));
gpuErrorCheck(cudaMalloc((void**)&d_AABB, sizeof(AABB)*nTriangle));
gpuErrorCheck(cudaMemcpy(d_Triangles, h_Triangles, sizeof(Triangle)*nTriangle, cudaMemcpyHostToDevice));
nodes.allocateMemory(nTriangle / 3);
triangleNodeAssociation.allocateMemory(nTriangle * 30);
triangleNodeAssociationHelper.allocateMemory(nTriangle * 10);
activeList.allocateMemory(nTriangle / 3);
nextList.allocateMemory(nTriangle / 3);
smallList.allocateMemory(nTriangle / 3);
}
void freeMemory()
{
printf("KD Tree Free\n");
gpuErrorCheck(cudaFree(d_Triangles));
gpuErrorCheck(cudaFree(d_AABB));
}
AABB CalculateRootAABB()
{
thrust::device_ptr<AABB> thrustPtr(d_AABB);
float maxx = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MaxX(), 0, thrust::maximum<float>());
float maxy = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MaxY(), 0, thrust::maximum<float>());
float maxz = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MaxZ(), 0, thrust::maximum<float>());
float minx = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MinX(), 0, thrust::minimum<float>());
float miny = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MinY(), 0, thrust::minimum<float>());
float minz = thrust::transform_reduce(thrustPtr, thrustPtr + nTriangle, MinZ(), 0, thrust::minimum<float>());
gpuErrorCheck(cudaDeviceSynchronize());
AABB tmp;
tmp.bounds[0] = vec3(minx, miny, minz);
tmp.bounds[1] = vec3(maxx, maxy, maxz);
return tmp;
}
void MidSplit()
{
InitRoot << <1, 1 >> > (nTriangle, nodes.data, nodes.d_ptr, activeList.data, activeList.d_ptr, nextList.d_ptr, smallList.d_ptr, triangleNodeAssociation.d_ptr, rootAABB);
gpuErrorCheck(cudaDeviceSynchronize());
CopyTriangle << <(nTriangle + 255) / 256, 256 >> > (triangleNodeAssociation.data, nTriangle);
gpuErrorCheck(cudaDeviceSynchronize());
while (!activeList.h_empty())
{
int base = nodes.size() - 1;
int startnode = nodes.size();
int start = triangleNodeAssociation.size();
triangleNodeAssociationHelper.h_clear();
MidSplitNode << <(activeList.size() + 255) / 256, 256 >> > (d_Triangles, d_AABB, nTriangle,
nodes.data,
nodes.d_ptr,
activeList.data,
activeList.d_ptr,
nextList.data,
nextList.d_ptr,
smallList.data,
smallList.d_ptr,
triangleNodeAssociation.data,
triangleNodeAssociation.d_ptr,
triangleNodeAssociationHelper.data,
triangleNodeAssociationHelper.d_ptr,
start);
gpuErrorCheck(cudaDeviceSynchronize());
int end = triangleNodeAssociation.size();
int endnode = nodes.size() - 1;
int noftna = end - start;
thrust::sort_by_key(triangleNodeAssociationHelper.thrustPtr, triangleNodeAssociationHelper.thrustPtr + noftna, triangleNodeAssociation.thrustPtr + start);
gpuErrorCheck(cudaDeviceSynchronize());
// calculate triangleIndex
CalculateTriangleIndex << <1, 1 >> > (startnode, endnode, base, nodes.data);
gpuErrorCheck(cudaDeviceSynchronize());
// switch aciveList and nextList
//cout<<"nextlist size:"<<nextList.size()<<" tnasize="<<noftna<<endl;
gpuErrorCheck(cudaMemcpy(activeList.data, nextList.data, sizeof(int)*nextList.size(), cudaMemcpyDeviceToDevice));
gpuErrorCheck(cudaMemcpy(activeList.d_ptr, nextList.d_ptr, sizeof(unsigned int), cudaMemcpyDeviceToDevice));
nextList.h_clear();
triangleNodeAssociationHelper.h_clear();
gpuErrorCheck(cudaDeviceSynchronize());
}
}
void SAHSplit()
{
{
while (!smallList.h_empty())
{
int base = nodes.size() - 1;
int startnode = nodes.size();
int start = triangleNodeAssociation.size();
triangleNodeAssociationHelper.h_clear();
SAHSplitNode << <(smallList.size() + 255) / 256, 256 >> > (d_Triangles, d_AABB, nTriangle,
nodes.data,
nodes.d_ptr,
smallList.data,
smallList.d_ptr,
nextList.data,
nextList.d_ptr,
triangleNodeAssociation.data,
triangleNodeAssociation.d_ptr,
triangleNodeAssociationHelper.data,
triangleNodeAssociationHelper.d_ptr,
start);
gpuErrorCheck(cudaDeviceSynchronize());
int end = triangleNodeAssociation.size();
int endnode = nodes.size() - 1;
int noftna = end - start;
thrust::sort_by_key(triangleNodeAssociationHelper.thrustPtr, triangleNodeAssociationHelper.thrustPtr + noftna, triangleNodeAssociation.thrustPtr + start);
gpuErrorCheck(cudaDeviceSynchronize());
// calculate triangleIndex
CalculateTriangleIndex << <1, 1 >> > (startnode, endnode, base, nodes.data);
gpuErrorCheck(cudaDeviceSynchronize());
// switch aciveList and nextList
//cout<<"nextlist size:"<<nextList.size()<<" tnasize="<<noftna<<endl;
gpuErrorCheck(cudaMemcpy(smallList.data, nextList.data, sizeof(int)*nextList.size(), cudaMemcpyDeviceToDevice));
gpuErrorCheck(cudaMemcpy(smallList.d_ptr, nextList.d_ptr, sizeof(unsigned int), cudaMemcpyDeviceToDevice));
nextList.h_clear();
triangleNodeAssociationHelper.h_clear();
gpuErrorCheck(cudaDeviceSynchronize());
}
}
}
};
#pragma endregion KDTree
struct Mesh
{
__host__ __device__ Mesh() {}
__host__ Mesh(vec3 position, const char* fileName = "", Material material = Material())
{
this->position = position;
std::string mtlBasePath;
std::string inputFile = fileName;
unsigned long pos = inputFile.find_last_of("/");
mtlBasePath = inputFile.substr(0, pos + 1);
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> obj_shapes;
std::vector<tinyobj::material_t> obj_materials;
std::vector<int> materialIDs;
printf("Loading %s...\n", fileName);
std::string err;
bool ret = tinyobj::LoadObj(&attrib, &obj_shapes, &obj_materials, &err, inputFile.c_str(), mtlBasePath.c_str());
if (!err.empty())
std::cerr << err << std::endl;
if (!ret) exit(1);
for (auto & obj_material : obj_materials)
{
std::string texturePath = "";
vec3 diffuseColor = vec3(obj_material.diffuse[0], obj_material.diffuse[1], obj_material.diffuse[2]);
vec3 emissionColor = vec3(obj_material.emission[0], obj_material.emission[1], obj_material.emission[2]);
materialIDs.push_back(materials.size());
materials.push_back(Material(material.type, diffuseColor, emissionColor));
}
long shapeSize, faceSize;
shapeSize = obj_shapes.size();
std::vector<Triangle>* triangles = new std::vector<Triangle>;
for (int i = 0; i < shapeSize; i++)
{
size_t index_offset = 0;
faceSize = obj_shapes[i].mesh.num_face_vertices.size();
for (size_t f = 0; f < faceSize; f++)
{
size_t fnum = obj_shapes[i].mesh.num_face_vertices[f];
vec3 pos[3];
vec3 nor[3];
for (int k = 0; k < 3; k++)
{
tinyobj::index_t idx = obj_shapes[i].mesh.indices[index_offset + k];
pos[k] = vec3(
attrib.vertices[3 * idx.vertex_index + 0],
attrib.vertices[3 * idx.vertex_index + 1],
attrib.vertices[3 * idx.vertex_index + 2]
);
nor[k] = vec3(
attrib.normals[3 * idx.normal_index + 0],
attrib.normals[3 * idx.normal_index + 1],
attrib.normals[3 * idx.normal_index + 2]
);
nor[k] = normalize(nor[k]);
}
Triangle triangle;
if (obj_shapes[i].mesh.material_ids[f] < materialIDs.size())
{
triangle = Triangle(pos[0], pos[1], pos[2], nor[0], nor[1], nor[2], materialIDs[obj_shapes[i].mesh.material_ids[f]]);
}
else
{
triangle = Triangle(pos[0], pos[1], pos[2], nor[0], nor[1], nor[2], 0);
}
triangles->push_back(triangle);
index_offset += fnum;
}
}
this->count = triangles->size();
this->triangles = triangles->data();
}
vec3 position;
Triangle* triangles;
int count;
KDTree* tree;
KDTreeNode* nodes;
int* tna;
__device__ ObjectIntersection Intersect(Ray ray)
{
#if ENABLE_KDTREE
ObjectIntersection intersection = ObjectIntersection();
intersection = RayKDTreeTraversal(nodes, tna, ray, triangles, position);
if (intersection.hit == true)
intersection.hitPtr = this;
return intersection;
#else
float tNear = INFINITY;
ObjectIntersection intersection = ObjectIntersection();
for (int i = 0; i < count; i++)
{
ObjectIntersection temp = triangles[i].Intersect(ray);
if (temp.hit && temp.t < tNear)
{
tNear = temp.t;
intersection = temp;
}
}
return intersection;
#endif
}
};
#pragma endregion Structs
#pragma region Kernels
__device__ ObjectIntersection Intersect(Ray ray, KernelArray<Sphere> spheres, KernelArray<Triangle> triangles)
{
ObjectIntersection intersection = ObjectIntersection();
ObjectIntersection temp = ObjectIntersection();
for (int i = 0; i < spheres.size; i++)
{
temp = spheres.array[i].Intersect(ray);
if (temp.hit)
{
if (intersection.t == 0 || temp.t < intersection.t)
{
intersection = temp;
}
}
}
for (int i = 0; i < triangles.size; i++)
{
float tNear = INFINITY;
ObjectIntersection triangleIntersection = ObjectIntersection();
for (int i = 0; i < triangles.size; i++)
{
ObjectIntersection temp = triangles.array[i].Intersect(ray);
if (temp.hit && temp.t < tNear)
{
tNear = temp.t;
triangleIntersection = temp;
}
}
if (triangleIntersection.hit)
{
if (intersection.t == 0 || triangleIntersection.t < intersection.t)
{
intersection = triangleIntersection;
}
}
}
return intersection;
}
__device__ Ray GetReflectedRay(Ray ray, vec3 hitPoint, glm::vec3 normal, vec3 &mask, Material material, curandState* randState)
{
switch (material.type)
{
case DIFF:
{
vec3 nl = dot(normal, ray.direction) < EPSILON ? normal : normal * -1.0f;
float r1 = two_pi<float>() * curand_uniform(randState);
float r2 = curand_uniform(randState);
float r2s = sqrt(r2);
vec3 w = nl;
vec3 u;
if (fabs(w.x) > 0.1f)
u = normalize(cross(vec3(0.0f, 1.0f, 0.0f), w));
else
u = normalize(cross(vec3(1.0f, 0.0f, 0.0f), w));
vec3 v = cross(w, u);
vec3 reflected = normalize((u * __cosf(r1) * r2s + v * __sinf(r1) * r2s + w * sqrt(1 - r2)));
mask *= material.color;
return Ray(hitPoint, reflected);
}
case GLOSS:
{
float phi = 2 * pi<float>() * curand_uniform(randState);
float r2 = curand_uniform(randState);
float phongExponent = 20;
float cosTheta = __powf(1 - r2, 1.0f / (phongExponent + 1));
float sinTheta = __sinf(1 - cosTheta * cosTheta);
vec3 w = normalize(ray.direction - normal * 2.0f * dot(normal, ray.direction));
vec3 u = normalize(cross((fabs(w.x) > .1 ? vec3(0, 1, 0) : vec3(1, 0, 0)), w));
vec3 v = cross(w, u);
vec3 reflected = normalize(u * __cosf(phi) * sinTheta + v * __sinf(phi) * sinTheta + w * cosTheta);
mask *= material.color;
return Ray(hitPoint, reflected);
}
case TRANS:
{
vec3 nl = dot(normal, ray.direction) < EPSILON ? normal : normal * -1.0f;
vec3 reflection = ray.direction - normal * 2.0f * dot(normal, ray.direction);
bool into = dot(normal, nl) > EPSILON;
float nc = 1.0f;
float nt = 1.5f;
float nnt = into ? nc / nt : nt / nc;
float Re, RP, TP, Tr;
vec3 tdir = vec3(0.0f, 0.0f, 0.0f);
float ddn = dot(ray.direction, nl);
float cos2t = 1.0f - nnt * nnt * (1.0f - ddn * ddn);
if (cos2t < EPSILON) return Ray(hitPoint, reflection);
if (into)
tdir = normalize((ray.direction * nnt - normal * (ddn * nnt + sqrt(cos2t))));
else
tdir = normalize((ray.direction * nnt + normal * (ddn * nnt + sqrt(cos2t))));
float a = nt - nc;
float b = nt + nc;
float R0 = a * a / (b * b);
float c;
if (into)
c = 1 + ddn;
else
c = 1 - dot(tdir, normal);
Re = R0 + (1 - R0) * c * c * c * c * c;
Tr = 1 - Re;
float P = .25 + .5 * Re;
RP = Re / P;
TP = Tr / (1 - P);
if (curand_uniform(randState) < P)
{
mask *= (RP);
return Ray(hitPoint, reflection);
}
mask *= (TP);
return Ray(hitPoint, tdir);
}
case SPEC:
{
vec3 reflected = ray.direction - normal * 2.0f * dot(normal, ray.direction);
mask *= material.color;
return Ray(hitPoint, reflected);
}
}
}
// Path Tracing + Photon Map
__device__ vec3 TraceRay(Ray ray, KernelArray<Sphere> spheres, KernelArray<Triangle> triangles, KernelArray<Material> materials, bool directLighting, float directLightingConstant, curandState* randState)
{
vec3 resultColor = vec3(0);
vec3 mask = vec3(1);
for (int depth = 0; depth < MAX_DEPTH; depth++)
{
ObjectIntersection intersection = Intersect(ray, spheres, triangles);
if (intersection.hit == 0)
{
float longlatX = atan2(ray.direction.x, ray.direction.z);
longlatX = longlatX < EPSILON ? longlatX + two_pi<float>() : longlatX;
float longlatY = acos(-ray.direction.y);
float u = longlatX / two_pi<float>();
float v = longlatY / pi<float>();
int u2 = (int)(u * HDRWidth);
int tvec = (int)(v * HDRHeight);
int HDRtexelidx = u2 + tvec * HDRWidth;
float4 HDRcol = tex1Dfetch(HDRtexture, HDRtexelidx);
vec3 HDRcol2 = vec3(HDRcol.x, HDRcol.y, HDRcol.z);
return resultColor + (mask * HDRcol2);
}
vec3 hitPoint = ray.origin + ray.direction * intersection.t;
Material hitMaterial = materials.array[intersection.materialID];
vec3 emission = hitMaterial.emission;
float maxReflection = max(max(mask.r, mask.g), mask.b);
if (curand_uniform(randState) > maxReflection)
break;
resultColor += mask * emission;
ray = GetReflectedRay(ray, hitPoint, intersection.normal, mask, hitMaterial, randState);
mask *= 1 / maxReflection;
}
return resultColor;
}
// Real time + Photon Mapping Kernel
__global__ void PathKernel(Camera* camera, KernelArray<Sphere> spheres, KernelArray<Triangle> triangles, KernelArray<Material> materials, int loopX, int loopY, bool dof, bool directLighting, float directLightingConstant, int frame, cudaSurfaceObject_t surface)
{
int width = camera->width;
int height = camera->height;
int x = gridDim.x * blockDim.x * loopX + blockIdx.x * blockDim.x + threadIdx.x;
int y = gridDim.y * blockDim.y * loopY + blockIdx.y * blockDim.y + threadIdx.y;
int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int i = y * width + x;
if (i >= width * height) return;
curandState randState;
float4 originColor;
surf2Dread(&originColor, surface, x * sizeof(float4), y);
vec3 resultColor = vec3(0, 0, 0);
curand_init(WangHash(threadId) + WangHash(frame), 0, 0, &randState);
Ray ray = camera->GetRay(&randState, x, y, dof);
vec3 color = TraceRay(ray, spheres, triangles, materials, directLighting, directLightingConstant, &randState);
resultColor = (vec3(originColor.x, originColor.y, originColor.z) * (float)(frame - 1) + color) / (float)frame;
surf2Dwrite(make_float4(resultColor.r, resultColor.g, resultColor.b, 1.0f), surface, x * sizeof(float4), y);
}
// Photon Mapping Rendering Loop
void TracingLoop(Camera* camera, KernelArray<Sphere> spheres, KernelArray<Triangle> triangles, KernelArray<Material> materials, int frame, bool dof, bool directLighting, float directLightingConstant, cudaSurfaceObject_t surface)
{
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 5000000000 * sizeof(float));
for (int i = 0; i < TRACE_OUTER_LOOP_X; i++)
{
for (int j = 0; j < TRACE_OUTER_LOOP_Y; j++)
{
PathKernel << <grid, block >> > (camera, spheres, triangles, materials, i, j, dof, directLighting, directLightingConstant, frame, surface);
gpuErrorCheck(cudaDeviceSynchronize());
}
}
}
void RenderRealTime(cudaSurfaceObject_t surface, bool dof, bool photon, bool directLighting, int frame)
{
int width = camera->width;
int height = camera->height;
cudaEvent_t start, stop;
gpuErrorCheck(cudaEventCreate(&start));
gpuErrorCheck(cudaEventRecord(start, 0));
Camera* cudaCamera;
gpuErrorCheck(cudaMalloc(&cudaCamera, sizeof(Camera)));
gpuErrorCheck(cudaMemcpy(cudaCamera, camera, sizeof(Camera), cudaMemcpyHostToDevice));
thrust::device_vector<Sphere> cudaSpheres(spheres);
thrust::device_vector<Material> cudaMaterials(materials);
thrust::host_vector<Triangle> triangles;
thrust::device_vector<Triangle> cudaTriangles;
for (auto & mesh : meshes)
{
for (int i = 0; i < mesh.count; i++)
{
for (auto & pos : mesh.triangles[i].pos)
{
pos += + mesh.position;
}
triangles.push_back(mesh.triangles[i]);
}
}
cudaTriangles = triangles;
// int meshCount = sizeof(meshes) / sizeof(Mesh);
// Mesh* cudaMeshes;
// std::vector<Mesh> meshVector;
// std::vector<Triangle*> triangleVector;
// for (int i = 0; i < meshCount; i++)
// {
// Mesh currentMesh = meshes[i];
// Mesh cudaMesh = currentMesh;
// Triangle* cudaTriangles;
// gpuErrorCheck(cudaMalloc(&cudaTriangles, sizeof(Triangle) * currentMesh.count));
// gpuErrorCheck(cudaMemcpy(cudaTriangles, currentMesh.triangles, sizeof(Triangle) * currentMesh.count, cudaMemcpyHostToDevice));
//
//#if ENABLE_KDTREE
// cudaMesh.nodes = currentMesh.tree->nodes.data;
// cudaMesh.tna = currentMesh.tree->triangleNodeAssociation.data;
//#endif
// cudaMesh.triangles = cudaTriangles;
// meshVector.push_back(cudaMesh);
// triangleVector.push_back(cudaTriangles);
// }
// gpuErrorCheck(cudaMalloc(&cudaMeshes, sizeof(Mesh) * meshCount));
// gpuErrorCheck(cudaMemcpy(cudaMeshes, meshVector.data(), sizeof(Mesh) * meshCount, cudaMemcpyHostToDevice));
gpuErrorCheck(cudaEventCreate(&stop));
gpuErrorCheck(cudaEventRecord(stop, 0));
gpuErrorCheck(cudaEventSynchronize(stop));
gpuErrorCheck(cudaEventElapsedTime(&memoryAllocTime, start, stop));
gpuErrorCheck(cudaEventDestroy(start));
gpuErrorCheck(cudaEventDestroy(stop));
block = dim3(16, 9);
grid.x = ceil(ceil(width / TRACE_OUTER_LOOP_X) / block.x);
grid.y = ceil(ceil(height / TRACE_OUTER_LOOP_Y) / block.y);
gpuErrorCheck(cudaEventCreate(&start));
gpuErrorCheck(cudaEventRecord(start, 0));
TracingLoop(cudaCamera, ConvertToKernel(cudaSpheres), ConvertToKernel(cudaTriangles), ConvertToKernel(cudaMaterials), frame, dof, directLighting, directLightingConstant, surface);
gpuErrorCheck(cudaDeviceSynchronize());
gpuErrorCheck(cudaEventCreate(&stop));
gpuErrorCheck(cudaEventRecord(stop, 0));
gpuErrorCheck(cudaEventSynchronize(stop));
gpuErrorCheck(cudaEventElapsedTime(&renderingTime, start, stop));
gpuErrorCheck(cudaEventDestroy(start));
gpuErrorCheck(cudaEventDestroy(stop));
gpuErrorCheck(cudaFree(cudaCamera));
}
#pragma endregion Kernels
#pragma region Opengl Callbacks
void Keyboard(unsigned char key, int x, int y)
{
keyState[key] = true;
mousePos[0] = x;
mousePos[1] = y;
if (IsKeyDown('r'))
{
enableDof = !enableDof;
cudaDirty = true;
}
if (IsKeyDown('b'))
{
enablePhoton = !enablePhoton;
cudaDirty = true;
}
if (IsKeyDown('q'))
{
enableSaveImage = true;
frame = 1;
cudaDirty = false;
cudaToggle = true;
}
if (IsKeyDown('f'))
{
cudaToggle = !cudaToggle;
frame = 1;
cudaDirty = false;
}
if (IsKeyDown('n'))
{
enableDrawNormal = !enableDrawNormal;
cudaToggle = false;
}
if (IsKeyDown('k'))
{
enableDrawKDTree = !enableDrawKDTree;
cudaToggle = false;
}
if (IsKeyDown('l'))
{
enableDirectLighting = !enableDirectLighting;
cudaDirty = true;
}
if (IsKeyDown('t'))
{
camera->aperture += 0.1f;
cudaDirty = true;
printf("%f %f\n", camera->aperture, camera->focalDistance);
}
if (IsKeyDown('g'))
{
camera->aperture -= 0.1f;
cudaDirty = true;
printf("%f %f\n", camera->aperture, camera->focalDistance);
}
if (IsKeyDown('y'))
{
camera->focalDistance += 0.5f;
cudaDirty = true;
printf("%f %f\n", camera->aperture, camera->focalDistance);
}
if (IsKeyDown('h'))
{
camera->focalDistance -= 0.5f;
cudaDirty = true;
printf("%f %f\n", camera->aperture, camera->focalDistance);
}
if (IsKeyDown('p'))
{
printf("Camera Position : %f %f %f\n", camera->position.x, camera->position.y, camera->position.z);
printf("Pitch Yaw : %f %f\n", camera->pitch, camera->yaw);
}
if (IsKeyDown('u'))
{
enableGUI = !enableGUI;
}
ImGuiIO& io = ImGui::GetIO();
io.AddInputCharacter(key);
glutPostRedisplay();
}
void KeyboardUp(unsigned char key, int x, int y)
{
keyState[key] = false;
mousePos[0] = x;
mousePos[1] = y;
glutPostRedisplay();
}
void Special(int key, int x, int y)
{
ImGuiIO& io = ImGui::GetIO();
io.AddInputCharacter(key);
glutPostRedisplay();
}
void SpecialUp(int key, int x, int y)
{
glutPostRedisplay();
}
void Mouse(int button, int state, int x, int y)
{
mousePos[0] = x;
mousePos[1] = y;
mouseState[button] = !state;
ImGuiIO& io = ImGui::GetIO();
io.MousePos = ImVec2(float(x), float(y));
if (state == GLUT_DOWN && (button == GLUT_LEFT_BUTTON))
io.MouseDown[0] = true;
else
io.MouseDown[0] = false;
if (state == GLUT_DOWN && (button == GLUT_RIGHT_BUTTON))
io.MouseDown[1] = true;
else
io.MouseDown[1] = false;
glutPostRedisplay();
}
void MouseWheel(int button, int dir, int x, int y)
{
if (dir > 0)
{
camera->fov++;
cudaDirty = true;
}
else
{
camera->fov--;
cudaDirty = true;
}
glutPostRedisplay();
}
void Motion(int x, int y)
{
mousePos[0] = x;
mousePos[1] = y;
ImGuiIO& io = ImGui::GetIO();
io.MousePos = ImVec2(float(x), float(y));
glutPostRedisplay();
}
void Reshape(int w, int h)
{
camera->UpdateScreen(w, h);
}
void Idle()
{
int timeSinceStart = glutGet(GLUT_ELAPSED_TIME);
deltaTime = (timeSinceStart - oldTimeSinceStart) * 0.001f;
oldTimeSinceStart = timeSinceStart;
glutPostRedisplay();
}
void Display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
camera->UpdateCamera(deltaTime);
// OpenGL Draw
if (cudaToggle)
{
int width = camera->width;
int height = camera->height;
glColor3f(1, 1, 1);
glDisable(GL_LIGHTING);
cudaGraphicsGLRegisterImage(&viewResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard);
cudaGraphicsMapResources(1, &viewResource);
cudaGraphicsSubResourceGetMappedArray(&viewArray, viewResource, 0, 0);
cudaResourceDesc viewCudaArrayResourceDesc;
{
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewArray;
}
cudaSurfaceObject_t viewCudaSurfaceObject;
gpuErrorCheck(cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc));
{
if (cudaDirty)
{
frame = 0;
cudaDirty = false;
}
RenderRealTime(viewCudaSurfaceObject, enableDof, enablePhoton, enableDirectLighting, ++frame);
}
gpuErrorCheck(cudaDestroySurfaceObject(viewCudaSurfaceObject));
gpuErrorCheck(cudaGraphicsUnmapResources(1, &viewResource));
cudaStreamSynchronize(0);
glLoadIdentity();
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, 0, height, -1000, 1000);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0, 1); glVertex2f(0, 0);
glTexCoord2f(1, 1); glVertex2f(width, 0);
glTexCoord2f(1, 0); glVertex2f(width, height);
glTexCoord2f(0, 0); glVertex2f(0, height);
}
glEnd();
}
if (enableSaveImage && frame >= imageSaveSamples)
{
enableSaveImage = false;
cudaToggle = false;
cudaDirty = false;
isSavingImage = false;
frame = 1;
GLubyte *pixels = new GLubyte[3 * width*height];
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, width, height, 3 * width, 24, FI_RGBA_RED_MASK, FI_RGBA_GREEN_MASK, FI_RGBA_BLUE_MASK, false);
SwapRedBlue32(image);
FreeImage_Save(FIF_PNG, image, "Result.png", 0);
FreeImage_Unload(image);
delete pixels;
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
glEnable(GL_LIGHTING);
}
else
{
// Draw Opengl
{
for (int n = 0; n < spheres.size(); n++)
{
glPushMatrix();
glTranslatef(spheres[n].position.x, spheres[n].position.y, spheres[n].position.z);
glColor3fv(value_ptr(materials[spheres[n].materialID].color));
int i, j;
int lats = 50;
int longs = 50;
float radius = spheres[n].radius;
for (i = 0; i <= lats; i++)
{
float lat0 = pi<float>() * (-float(0.5) + (float) (i - 1) / lats);
float z0 = radius * sin(lat0);
float zr0 = radius * cos(lat0);
float lat1 = pi<float>() * (-float(0.5) + (float) i / lats);
float z1 = radius * sin(lat1);
float zr1 = radius * cos(lat1);
glBegin(GL_QUAD_STRIP);
for (j = 0; j <= longs; j++)
{
float lng = 2 * pi<float>() * (float) (j - 1) / longs;
float x = cos(lng);
float y = sin(lng);
glNormal3f(x * zr1, y * zr1, z1);
glVertex3f(x * zr1, y * zr1, z1);
glNormal3f(x * zr0, y * zr0, z0);
glVertex3f(x * zr0, y * zr0, z0);
}
glEnd();
}
glPopMatrix();
}
for (int n = 0; n < meshes.size(); n++)
{
glPushMatrix();
glTranslatef(meshes[n].position.x, meshes[n].position.y, meshes[n].position.z);
Triangle* triangles = meshes[n].triangles;
for (int i = 0; i < meshes[n].count; i++)
{
glColor3fv(value_ptr(materials[triangles[i].materialID].color));
vec3 p0 = triangles[i].pos[0];
vec3 p1 = triangles[i].pos[1];
vec3 p2 = triangles[i].pos[2];
vec3 normal = cross((p2 - p0), (p1 - p0));
normal = normalize(normal);
glBegin(GL_TRIANGLE_STRIP);
glNormal3fv(value_ptr(normal));
glVertex3fv(value_ptr(p0));
glVertex3fv(value_ptr(p1));
glVertex3fv(value_ptr(p2));
glEnd();
if (enableDrawNormal)
{
glLineWidth(1.0f);
glColor3f(1.0f, 1.0f, 1.0f);
glBegin(GL_LINES);
glVertex3fv(value_ptr(triangles[i].pos[0]));
glVertex3fv(value_ptr(triangles[i].nor[0] + triangles[i].pos[0]));
glVertex3fv(value_ptr(triangles[i].pos[1]));
glVertex3fv(value_ptr(triangles[i].nor[1] + triangles[i].pos[1]));
glVertex3fv(value_ptr(triangles[i].pos[2]));
glVertex3fv(value_ptr(triangles[i].nor[2] + triangles[i].pos[2]));
glEnd();
}
}
if (enableDrawKDTree)
{
glDisable(GL_LIGHTING);
int nodeSize = meshes[n].tree->nodes.size();
glLineWidth(1.0f);
KDTreeNode* nodes = new KDTreeNode[nodeSize];
meshes[n].tree->nodes.CopyToHost(nodes);
for (int i = 0; i < meshes[n].tree->nodes.size(); i++)
{
if (nodes[i].depth > KDTREE_MAX_DEPTH)
printf("WHAT %d\n", nodes[i].depth);
AABB box = nodes[i].nodeAABB;
vec3 corner[8];
corner[0] = { box.bounds[0].x, box.bounds[0].y, box.bounds[0].z };
corner[1] = { box.bounds[1].x, box.bounds[0].y, box.bounds[0].z };
corner[2] = { box.bounds[1].x, box.bounds[0].y, box.bounds[1].z };
corner[3] = { box.bounds[0].x, box.bounds[0].y, box.bounds[1].z };
corner[4] = { box.bounds[0].x, box.bounds[1].y, box.bounds[0].z };
corner[5] = { box.bounds[1].x, box.bounds[1].y, box.bounds[0].z };
corner[6] = { box.bounds[1].x, box.bounds[1].y, box.bounds[1].z };
corner[7] = { box.bounds[0].x, box.bounds[1].y, box.bounds[1].z };
glColor3f(1.0f, 1 - (i / float(nodeSize)), 0.0f);
glLineWidth(i / float(nodeSize));
glBegin(GL_LINES);
glVertex3f(corner[0].x, corner[0].y, corner[0].z);
glVertex3f(corner[1].x, corner[1].y, corner[1].z);
glVertex3f(corner[1].x, corner[1].y, corner[1].z);
glVertex3f(corner[2].x, corner[2].y, corner[2].z);
glVertex3f(corner[2].x, corner[2].y, corner[2].z);
glVertex3f(corner[3].x, corner[3].y, corner[3].z);
glVertex3f(corner[3].x, corner[3].y, corner[3].z);
glVertex3f(corner[0].x, corner[0].y, corner[0].z);
glVertex3f(corner[0].x, corner[0].y, corner[0].z);
glVertex3f(corner[4].x, corner[4].y, corner[4].z);
glVertex3f(corner[1].x, corner[1].y, corner[1].z);
glVertex3f(corner[5].x, corner[5].y, corner[5].z);
glVertex3f(corner[2].x, corner[2].y, corner[2].z);
glVertex3f(corner[6].x, corner[6].y, corner[6].z);
glVertex3f(corner[3].x, corner[3].y, corner[3].z);
glVertex3f(corner[7].x, corner[7].y, corner[7].z);
glVertex3f(corner[4].x, corner[4].y, corner[4].z);
glVertex3f(corner[5].x, corner[5].y, corner[5].z);
glVertex3f(corner[5].x, corner[5].y, corner[5].z);
glVertex3f(corner[6].x, corner[6].y, corner[6].z);
glVertex3f(corner[6].x, corner[6].y, corner[6].z);
glVertex3f(corner[7].x, corner[7].y, corner[7].z);
glVertex3f(corner[7].x, corner[7].y, corner[7].z);
glVertex3f(corner[4].x, corner[4].y, corner[4].z);
glEnd();
}
delete[] nodes;
glEnable(GL_LIGHTING);
}
glPopMatrix();
}
}
}
int width = camera->width;
int height = camera->height;
ImGui_ImplGLUT_NewFrame(width, height);
// UI
if (enableGUI)
{
ImGui::Begin("Cuda Tracer", nullptr, ImVec2(0,0), -1.0f, ImGuiWindowFlags_AlwaysAutoResize);
ImGui::SetNextWindowPos(ImVec2(0, 0), ImGuiSetCond_Once);
ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate);
if (cudaToggle)
{
ImGui::Text("Current Frame : %d", frame);
if (isSavingImage)
{
ImGui::PushItemFlag(ImGuiItemFlags_Disabled, true);
ImGui::PushStyleVar(ImGuiStyleVar_Alpha, ImGui::GetStyle().Alpha * 0.5f);
}
if (ImGui::Button("Save Image"))
{
GLubyte *pixels = new GLubyte[3 * width*height];
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, width, height, 3 * width, 24, FI_RGBA_RED_MASK, FI_RGBA_GREEN_MASK, FI_RGBA_BLUE_MASK, false);
SwapRedBlue32(image);
stringstream ss;
ss << "Result_" << frame << ".png";
FreeImage_Save(FIF_PNG, image, ss.str().c_str(), 0);
FreeImage_Unload(image);
delete pixels;
}
ImGui::Text("Memory Allocation Time : %f ms", memoryAllocTime);
ImGui::Text("Rendering time : %f ms", renderingTime);
ImGui::Text("Total Time : %f ms", memoryAllocTime + renderingTime);
if (ImGui::Checkbox("Enable Dof", &enableDof))
cudaDirty = true;
if (enableDof)
{
if (ImGui::SliderFloat("Focal Distance", &(camera->focalDistance), EPSILON, 500))
cudaDirty = true;
if (ImGui::SliderFloat("Aperture", &(camera->aperture), EPSILON, 50))
cudaDirty = true;
}
//if (ImGui::Checkbox("Enable Direct Lighting", &enableDirectLighting))
// cudaDirty = true;
//if (ImGui::Checkbox("Enable Photon Mapping", &enablePhoton))
// cudaDirty = true;
//if (ImGui::SliderFloat("Direct Lighting Weight", &directLightingConstant, EPSILON, 1000.0f))
// cudaDirty = true;
if (isSavingImage)
{
ImGui::PopItemFlag();
ImGui::PopStyleVar();
}
}
else
{
ImGui::InputInt("Image Samples", &imageSaveSamples, 1, 1000);
ImGui::SameLine();
if (ImGui::Button("Save Image"))
{
enableSaveImage = true;
frame = 1;
cudaDirty = false;
cudaToggle = true;
isSavingImage = true;
}
if (ImGui::Checkbox("Draw Normal", &enableDrawNormal))
cudaDirty = true;
if (ImGui::Checkbox("Draw Debug KDTree AABBox", &enableDrawKDTree))
cudaDirty = true;
}
if (!isSavingImage)
{
int sphereCount = sizeof(spheres) / sizeof(Sphere);
int meshCount = sizeof(meshes) / sizeof(Mesh);
if (ImGui::CollapsingHeader("Objects"))
{
ImGui::Text("Spheres : %d", sphereCount);
ImGui::Text("Meshes : %d", meshCount);
ImGui::SliderInt("Current Object", &objectIndex, 0, sphereCount + meshCount - 1);
if (objectIndex < sphereCount)
{
if (ImGui::SliderFloat3("Position", value_ptr(spheres[objectIndex].position), -100.0f, 100.0f))
cudaDirty = true;
if (ImGui::SliderFloat("Radius", &(spheres[objectIndex].radius), EPSILON, 100))
cudaDirty = true;
//if (ImGui::ListBox("Material Type", (int*)&(spheres[objectIndex].material.type), MATERIAL_TYPE_ARRAY, IM_ARRAYSIZE(MATERIAL_TYPE_ARRAY)))
// cudaDirty = true;
//if (ImGui::SliderFloat3("Color", value_ptr(spheres[objectIndex].material.color), 0.0f, 1.0f))
// cudaDirty = true;
//if (ImGui::SliderFloat3("Emission", value_ptr(spheres[objectIndex].material.emission), 0.0f, 10.0f))
// cudaDirty = true;
}
else
{
int meshIndex = objectIndex - sphereCount;
ImGui::Text("Triangles : %d", meshes[meshIndex].count);
if (ImGui::SliderFloat3("Position", value_ptr(meshes[meshIndex].position), -100.0f, 100.0f))
cudaDirty = true;
}
}
}
ImGui::End();
ImGui::Render();
}
glutSwapBuffers();
}
#pragma endregion Opengl Callbacks
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE | GLUT_MULTISAMPLE);
glutInitWindowPosition(0, 0);
glutInitWindowSize(WIDTH, HEIGHT);
glutCreateWindow("Cuda Tracer");
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_GLUTMAINLOOP_RETURNS);
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK)
{
fprintf(stderr, "Failed to initialize GLEW()\n");
return -1;
}
GLfloat ambient[] = { 0.2f, 0.2f, 0.2f, 1.0f };
GLfloat diffuse[] = { 1.0f, 1.0f, 1.0f, 1.0f };
GLfloat specular[] = { 1.0f, 1.0f, 1.0f, 1.0f };
GLfloat position[] = { 0.0f, 40.0f, 0.0f, 0.0f };
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse);
glLightfv(GL_LIGHT0, GL_SPECULAR, specular);
glLightfv(GL_LIGHT0, GL_POSITION, position);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL);
glMaterialfv(GL_FRONT, GL_SPECULAR, specular);
glMateriali(GL_FRONT, GL_SHININESS, 15);
glShadeModel(GL_SMOOTH);
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glClearColor(0.6, 0.65, 0.85, 0);
FreeImage_Initialise();
{
// imgui
ImGui::CreateContext();
ImGuiIO& io = ImGui::GetIO(); (void) io;
ImGui_ImplGLUT_Init();
ImGui::StyleColorsDark();
}
{
// Init Scene
camera = new Camera;
materials.push_back(Material());
spheres.push_back(Sphere(vec3(0, 45, 0), 1.0f, Material(DIFF, vec3(1), vec3(2.2f, 2.2f, 2.2f))));
spheres.push_back(Sphere(vec3(-10, 8, -10), 8, Material(SPEC)));
spheres.push_back(Sphere(vec3(-10, 8, 10), 8, Material(TRANS)));
meshes.push_back(Mesh(vec3(0, 0, 0), "Cornell.obj", Material(DIFF)));
}
{
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, WIDTH, HEIGHT, 0, GL_RGBA, GL_FLOAT, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
}
{
FREE_IMAGE_FORMAT fif = FIF_HDR;
FIBITMAP *src(nullptr);
FIBITMAP *dst(nullptr);
BYTE* bits(nullptr);
float4* cpuHDRmap;
src = FreeImage_Load(fif, HDR_FILE_NAME);
dst = FreeImage_ToneMapping(src, FITMO_REINHARD05);
bits = FreeImage_GetBits(dst);
if (bits == nullptr)
return -1;
cpuHDRmap = new float4[HDRWidth * HDRHeight];
for (int x = 0; x < HDRWidth; x++)
{
for (int y = 0; y < HDRHeight; y++)
{
RGBQUAD rgbQuad;
FreeImage_GetPixelColor(dst, x, y, &rgbQuad);
cpuHDRmap[y*HDRWidth + x].x = rgbQuad.rgbRed / 256.0f;
cpuHDRmap[y*HDRWidth + x].y = rgbQuad.rgbGreen / 256.0f;
cpuHDRmap[y*HDRWidth + x].z = rgbQuad.rgbBlue / 256.0f;
cpuHDRmap[y*HDRWidth + x].w = 1.0f;
}
}
gpuErrorCheck(cudaMalloc(&cudaHDRmap, HDRWidth * HDRHeight * sizeof(float4)));
gpuErrorCheck(cudaMemcpy(cudaHDRmap, cpuHDRmap, HDRWidth * HDRHeight * sizeof(float4), cudaMemcpyHostToDevice));
HDRtexture.filterMode = cudaFilterModeLinear;
cudaChannelFormatDesc channel4desc = cudaCreateChannelDesc<float4>();
cudaBindTexture(NULL, &HDRtexture, cudaHDRmap, &channel4desc, HDRWidth * HDRHeight * sizeof(float4));
printf("Load HDR Map Success\n");
printf("Width : %d\nHeight : %d\n", HDRWidth, HDRHeight);
FreeImage_Unload(src);
FreeImage_Unload(dst);
delete cpuHDRmap;
}
#if ENABLE_KDTREE
int mesheCount = sizeof(meshes) / sizeof(Mesh);
for (int i = 0; i < mesheCount; i++)
{
meshes[i].tree = new KDTree(meshes[i].triangles, meshes[i].count);
meshes[i].tree->Build();
}
#endif
glutKeyboardFunc(Keyboard);
glutKeyboardUpFunc(KeyboardUp);
glutSpecialFunc(Special);
glutSpecialUpFunc(SpecialUp);
glutReshapeFunc(Reshape);
glutIdleFunc(Idle);
glutMouseWheelFunc(MouseWheel);
glutMouseFunc(Mouse);
glutPassiveMotionFunc(Motion);
glutMotionFunc(Motion);
glutDisplayFunc(Display);
glutMainLoop();
// Cleanup
cudaDeviceReset();
ImGui_ImplGLUT_Shutdown();
ImGui::DestroyContext();
return 0;
} |
bffb02e81f5e9f6c08c4960f53d2256a45f8841b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| bffb02e81f5e9f6c08c4960f53d2256a45f8841b.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
902cc00e33f0560b32da807952955312864e2959.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// moveAtoms.cu
// nestedDSMC
//
// Created by Christopher Watkins on 24/03/2015.
//
//
#include "moveAtoms.cuh"
#include "vectorMath.cuh"
#include "declareDeviceConstants.cuh"
#include "declareDeviceParameters.cuh"
void h_moveParticles(struct cudaGraphicsResource **cudaPBOres,
double3 *d_vel,
double3 *d_acc,
double timeValue,
int numberOfAtoms)
{
int blockSize;
int gridSize;
#ifdef CUDA7
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize,
&blockSize,
(const void *) d_moveParticles,
0,
numberOfAtoms );
gridSize = (numberOfAtoms + blockSize - 1) / blockSize;
#else
int device;
hipGetDevice ( &device );
int numSMs;
hipDeviceGetAttribute(&numSMs,
hipDeviceAttributeMultiprocessorCount,
device);
gridSize = 256*numSMs;
blockSize = NUM_THREADS;
#endif
// std::cout << "gridsize = " << gridSize << " blocksize = " << blockSize << std::endl;
// Map OpenGL buffer object for writing from CUDA
double3 *d_pos = mapCUDAVBOd3(cudaPBOres);
hipLaunchKernelGGL(( d_moveParticles), dim3(gridSize),dim3(blockSize), 0, 0, d_pos,
d_vel,
d_acc,
timeValue,
numberOfAtoms);
//Unmap buffer object
unmapCUDAVBO(cudaPBOres);
return;
}
__global__ void d_moveParticles(double3 *pos,
double3 *vel,
double3 *acc,
double dt,
int numberOfAtoms)
{
for (int atom = blockIdx.x * blockDim.x + threadIdx.x;
atom < numberOfAtoms;
atom += blockDim.x * gridDim.x)
{
double3 l_pos = pos[atom];
double3 l_vel = vel[atom];
double3 l_acc = acc[atom];
for (int i=0; i<30000; i++) {
velocityVerletUpdate(&l_pos,
&l_vel,
&l_acc,
dt);
}
pos[atom] = l_pos;
vel[atom] = l_vel;
acc[atom] = l_acc;
}
return;
}
__device__ void velocityVerletUpdate(double3 *pos,
double3 *vel,
double3 *acc,
double dt)
{
vel[0] = updateVel(vel[0],
acc[0],
0.5*dt);
pos[0] = updatePos(pos[0],
vel[0],
dt);
acc[0] = updateAcc(pos[0]);
vel[0] = updateVel(vel[0],
acc[0],
0.5*dt);
return;
}
__device__ void symplecticEulerUpdate(double3 *pos,
double3 *vel,
double3 *acc,
double dt)
{
acc[0] = updateAcc(pos[0]);
vel[0] = updateVel(vel[0],
acc[0],
dt);
pos[0] = updatePos(pos[0],
vel[0],
dt);
}
__device__ double3 updateVel(double3 vel,
double3 acc,
double dt)
{
return vel + acc * dt;
}
__device__ double3 updatePos(double3 pos,
double3 vel,
double dt)
{
return pos + vel * dt;
}
__device__ double3 updateAcc(double3 pos)
{
return -0.5 * d_gs * d_muB * dabsB(pos) / d_mRb;
} | 902cc00e33f0560b32da807952955312864e2959.cu | //
// moveAtoms.cu
// nestedDSMC
//
// Created by Christopher Watkins on 24/03/2015.
//
//
#include "moveAtoms.cuh"
#include "vectorMath.cuh"
#include "declareDeviceConstants.cuh"
#include "declareDeviceParameters.cuh"
void h_moveParticles(struct cudaGraphicsResource **cudaPBOres,
double3 *d_vel,
double3 *d_acc,
double timeValue,
int numberOfAtoms)
{
int blockSize;
int gridSize;
#ifdef CUDA7
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize,
&blockSize,
(const void *) d_moveParticles,
0,
numberOfAtoms );
gridSize = (numberOfAtoms + blockSize - 1) / blockSize;
#else
int device;
cudaGetDevice ( &device );
int numSMs;
cudaDeviceGetAttribute(&numSMs,
cudaDevAttrMultiProcessorCount,
device);
gridSize = 256*numSMs;
blockSize = NUM_THREADS;
#endif
// std::cout << "gridsize = " << gridSize << " blocksize = " << blockSize << std::endl;
// Map OpenGL buffer object for writing from CUDA
double3 *d_pos = mapCUDAVBOd3(cudaPBOres);
d_moveParticles<<<gridSize,blockSize>>>(d_pos,
d_vel,
d_acc,
timeValue,
numberOfAtoms);
//Unmap buffer object
unmapCUDAVBO(cudaPBOres);
return;
}
__global__ void d_moveParticles(double3 *pos,
double3 *vel,
double3 *acc,
double dt,
int numberOfAtoms)
{
for (int atom = blockIdx.x * blockDim.x + threadIdx.x;
atom < numberOfAtoms;
atom += blockDim.x * gridDim.x)
{
double3 l_pos = pos[atom];
double3 l_vel = vel[atom];
double3 l_acc = acc[atom];
for (int i=0; i<30000; i++) {
velocityVerletUpdate(&l_pos,
&l_vel,
&l_acc,
dt);
}
pos[atom] = l_pos;
vel[atom] = l_vel;
acc[atom] = l_acc;
}
return;
}
__device__ void velocityVerletUpdate(double3 *pos,
double3 *vel,
double3 *acc,
double dt)
{
vel[0] = updateVel(vel[0],
acc[0],
0.5*dt);
pos[0] = updatePos(pos[0],
vel[0],
dt);
acc[0] = updateAcc(pos[0]);
vel[0] = updateVel(vel[0],
acc[0],
0.5*dt);
return;
}
__device__ void symplecticEulerUpdate(double3 *pos,
double3 *vel,
double3 *acc,
double dt)
{
acc[0] = updateAcc(pos[0]);
vel[0] = updateVel(vel[0],
acc[0],
dt);
pos[0] = updatePos(pos[0],
vel[0],
dt);
}
__device__ double3 updateVel(double3 vel,
double3 acc,
double dt)
{
return vel + acc * dt;
}
__device__ double3 updatePos(double3 pos,
double3 vel,
double dt)
{
return pos + vel * dt;
}
__device__ double3 updateAcc(double3 pos)
{
return -0.5 * d_gs * d_muB * dabsB(pos) / d_mRb;
} |
5b33e0b10710c36fd6963e2a3bd6786c42e3adc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
An implementation of top-down splaying
D. Sleator <[email protected]>
March 1992
"Splay trees", or "self-adjusting search trees" are a simple and
efficient data structure for storing an ordered set. The data
structure consists of a binary tree, without parent pointers, and no
additional fields. It allows searching, insertion, deletion,
deletemin, deletemax, splitting, joining, and many other operations,
all with amortized logarithmic performance. Since the trees adapt to
the sequence of requests, their performance on real access patterns is
typically even better. Splay trees are described in a number of texts
and papers [1,2,3,4,5].
The code here is adapted from simple top-down splay, at the bottom of
page 669 of [3]. It can be obtained via anonymous ftp from
spade.pc.cs.cmu.edu in directory /usr/sleator/public.
The chief modification here is that the splay operation works even if the
item being splayed is not in the tree, and even if the tree root of the
tree is NULL. So the line:
t = splay(i, t);
causes it to search for item with key i in the tree rooted at t. If it's
there, it is splayed to the root. If it isn't there, then the node put
at the root is the last one before NULL that would have been reached in a
normal binary search for i. (It's a neighbor of i in the tree.) This
allows many other operations to be easily implemented, as shown below.
[1] "Fundamentals of data structures in C", Horowitz, Sahni,
and Anderson-Freed, Computer Science Press, pp 542-547.
[2] "Data Structures and Their Algorithms", Lewis and Denenberg,
Harper Collins, 1991, pp 243-251.
[3] "Self-adjusting Binary Search Trees" Sleator and Tarjan,
JACM Volume 32, No 3, July 1985, pp 652-686.
[4] "Data Structure and Algorithm Analysis", Mark Weiss,
Benjamin Cummins, 1992, pp 119-130.
[5] "Data Structures, Algorithms, and Performance", Derick Wood,
Addison-Wesley, 1993, pp 367-375.
*/
#include "config.h"
#ifdef _LOCAL_SPLAY_QUEUE
#include "queue_item.h"
#include "queue.cuh"
#include "local_splay_queue.cuh"
#include <stdio.h>
#include <hipcub/hipcub.hpp>
static int num_threads_lps = min(MAX_THREADS__LOCAL_ARRAY_QUEUE, NUM_LPS);
static int num_blocks_lps = (NUM_LPS + num_threads_lps - 1) / num_threads_lps;
__device__ long global_min;
__device__ long lp_min_ts[NUM_LPS];
__device__ queue_item *fel;
__device__ int malloc_buf_pos[NUM_LPS];
__device__ int insert_count[NUM_LPS];
__device__ int size[NUM_LPS]; /* number of nodes in the tree */
/* Not actually needed for any of the operations */
// typedef struct tree_node Tree;
__device__ tree_node *splay_root[NUM_LPS];
__device__ tree_node *malloc_buf;
__device__ tree_node *malloc_()
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// printf("malloc for idx %d\n", idx);
for(int i = 0; i < MALLOC_BUF_SIZE; i++)
{
int offset = (i + malloc_buf_pos[idx]) % MALLOC_BUF_SIZE;
int abs_offset = idx * MALLOC_BUF_SIZE + offset;
#ifdef _PHOLD
if(malloc_buf[abs_offset].item.ts == -1)
#else
if(__half2float(malloc_buf[abs_offset].item.f) == -1.0)
#endif
{
malloc_buf_pos[idx] = (offset + 1) % MALLOC_BUF_SIZE;
return &malloc_buf[abs_offset];
}
}
return NULL;
}
__device__ void free_(tree_node *node)
{
#ifdef _PHOLD
node->item.ts = -1;
#else
queue_item item;
item.f = __float2half(-1.0);
node->item = item;
#endif
}
__device__ tree_node *splay(queue_item i, tree_node * t)
{
/* Simple top down splay, not requiring i to be in the tree t. */
/* What it does is described above. */
tree_node N, *l, *r, *y;
if (t == NULL)
return t;
N.left = N.right = NULL;
l = r = &N;
for (;;) {
if (i <= t->item) {
if (t->left == NULL)
break;
if (i < t->left->item) {
y = t->left; /* rotate right */
t->left = y->right;
y->right = t;
t = y;
if (t->left == NULL)
break;
}
r->left = t; /* link right */
r = t;
t = t->left;
} else if (i > t->item) {
if (t->right == NULL)
break;
if (i > t->right->item) {
y = t->right; /* rotate left */
t->right = y->left;
y->left = t;
t = y;
if (t->right == NULL)
break;
}
l->right = t; /* link left */
l = t;
t = t->right;
} /* else {
break;
} */
}
l->right = t->left; /* assemble */
r->left = t->right;
t->left = N.right;
t->right = N.left;
return t;
}
__device__ tree_node *insert(queue_item i, tree_node * t)
{
/* Insert i into the tree t, unless it's already there. */
/* Return a pointer to the resulting tree. */
int idx = threadIdx.x + blockIdx.x * blockDim.x;
tree_node *new_;
// new_ = (tree_node *) malloc(sizeof(tree_node));
new_ = (tree_node *) malloc_();
// printf("%d: new is %p\n", threadIdx.x, new_);
if (new_ == NULL) {
printf("Ran out of space\n");
return NULL;
}
new_->item = i;
if (t == NULL) {
new_->left = new_->right = NULL;
size[idx] = 1;
return new_;
}
t = splay(i, t);
if (i <= t->item) {
new_->left = t->left;
new_->right = t;
t->left = NULL;
size[idx]++;
return new_;
} else if (i > t->item) {
new_->right = t->right;
new_->left = t;
t->right = NULL;
size[idx]++;
return new_;
}
assert(false);
return NULL;
// else { /* We get here if it's already in the tree */
// /* Don't add it again */
// free_(new_);
// return t;
// }
}
__device__ int counted_size[NUM_LPS];
__device__ void dump_tree(tree_node *node)
{
if(node == splay_root[threadIdx.x])
counted_size[threadIdx.x] = 0;
if(node == NULL)
{
printf("NULL\n");
return;
}
counted_size[threadIdx.x]++;
//printf("%ld\n", node->item.ts);
//printf("%.2f\n", __half2float(node->item.f));
printf("left: ");
dump_tree(node->left);
printf("right: ");
dump_tree(node->right);
printf("up\n");
if(node == splay_root[threadIdx.x] && counted_size[threadIdx.x] != size[threadIdx.x])
printf("%d: size is wrong: %d vs %d\n", threadIdx.x, counted_size[threadIdx.x], size[threadIdx.x]);
}
__device__ tree_node *delete_min(queue_item * min, tree_node * root, int lp)
{
/* Deletes the minimum from the tree */
/* Return a pointer to the resulting tree. */
// printf("%d: root is %p\n", threadIdx.x + blockIdx.x * blockDim.x, root);
tree_node *x;
if (root == NULL)
return NULL;
tree_node *t = root;
while(t->left != NULL)
t = t->left;
*min = t->item;
/* if(!(threadIdx.x + blockIdx.x * blockDim.x))
{
printf("before:\n");
dump_tree(t);
} */
t = splay(t->item, root);
/* if(!(threadIdx.x + blockIdx.x * blockDim.x))
{
printf("after:\n");
dump_tree(t);
} */
if (t->left == NULL) {
// printf("%d: t->left is NULL, t->right is %p\n", threadIdx.x + blockIdx.x * blockDim.x, t->right);
x = t->right;
} else {
// printf("%d: t->left is not NULL, t->right is %p\n", threadIdx.x + blockIdx.x * blockDim.x, t->right);
x = splay(t->item, t->left);
x->right = t->right;
}
size[lp]--;
free_(t);
// printf("%d returning %p\n", threadIdx.x + blockIdx.x * blockDim.x, x);
return x;
}
__device__ bool local_splay_queue_insert(queue_item item)
{
int lp = get_lp(item.node);
int insert_pos = atomicAdd(&insert_count[lp], 1);
int index = lp * FEL_SIZE + insert_pos;
//if (VERBOSE_DEBUG) {
/* #ifdef _PHOLD
if(lp == 1)
printf("inserting item with ts %ld at insert pos %d, index %d\n", item.ts,
insert_pos, index);
#endif */
//}
fel[index] = item;
return true;
}
__global__ void local_splay_queue_init_d()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x; idx < NUM_LPS; idx += blockDim.x * gridDim.x)
{
splay_root[idx] = NULL;
for(int i = 0; i < MALLOC_BUF_SIZE; i++)
{
#ifdef _PHOLD
malloc_buf[idx * MALLOC_BUF_SIZE + i].item.ts = -1;
#else
queue_item item;
item.f = __float2half(-1.0);
malloc_buf[idx * MALLOC_BUF_SIZE + i].item = item;
#endif
}
}
}
void local_splay_queue_init()
{
printf("\n\n-----------------------------------\n");
printf("[ LSQ ] Memory consumption\n");
printf("-----------------------------------\n");
printf(" available: %.2f MB\n", (float) DEVICE_MEMORY_MB);
printf(" enqueue buf: %d MB (%d items per enqueue buffer)\n", FEL_SIZE * ITEM_BYTES * NUM_LPS / 1024 / 1024, FEL_SIZE);
printf(" malloc buf: %d MB (%d items per malloc buffer)\n", MALLOC_BUF_SIZE * NUM_LPS * sizeof(tree_node) / 1024 / 1024, MALLOC_BUF_SIZE);
printf("-----------------------------------\n\n");
queue_item *h_fel;
CudaSafeCall( hipMalloc(&h_fel, ITEM_BYTES * FEL_SIZE * NUM_NODES) );
CudaSafeCall( hipMemcpyToSymbol(fel, &h_fel, sizeof(fel)) );
tree_node *h_malloc_buf;
CudaSafeCall( hipMalloc(&h_malloc_buf, sizeof(tree_node) * MALLOC_BUF_SIZE * NUM_NODES) );
CudaSafeCall( hipMemcpyToSymbol(malloc_buf, &h_malloc_buf, sizeof(malloc_buf)) );
hipLaunchKernelGGL(( local_splay_queue_init_d), dim3(num_blocks_lps), dim3(num_threads_lps), 0, 0, );
hipDeviceSynchronize();
}
__device__ int queue_peek(queue_item **item, int lp)
{
// return and splay min
tree_node *root = splay_root[lp];
if (root == NULL)
{
*item = NULL;
return -1;
}
tree_node *t = root;
while(t->left != NULL)
t = t->left;
#ifdef _PHOLD
if(t->item.ts >= global_min + LOOKAHEAD)
{
*item = NULL;
return -1;
}
#endif
*item = &(t->item);
t = splay(t->item, root);
splay_root[lp] = t;
return lp;
}
__device__ void local_splay_queue_set_done(int index)
{
int lp = index;
tree_node *root = splay_root[lp];
queue_item item;
splay_root[lp] = delete_min(&item, root, lp);
// dump_tree(splay_root[lp]);
}
void local_splay_queue_finish()
{
}
#ifdef _PHOLD
static __global__ void find_min_ts_device_pre()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
queue_item *item;
queue_peek(&item, idx);
if (item != NULL) {
lp_min_ts[idx] = item->ts;
} else {
lp_min_ts[idx] = LONG_MAX;
}
}
}
#endif
#ifdef _PHOLD
__device__ void *temp_storage = NULL;
__device__ size_t temp_storage_bytes = 0;
static __global__ void find_min_ts_device()
{
if(!temp_storage)
{
hipcub::DeviceReduce::Min(temp_storage, temp_storage_bytes, lp_min_ts,
&global_min, NUM_LPS);
CudaSafeCall( hipMalloc(&temp_storage, temp_storage_bytes) );
}
hipcub::DeviceReduce::Min(temp_storage, temp_storage_bytes, lp_min_ts,
&global_min, NUM_LPS);
// CudaSafeCall( hipFree(temp_storage) );
}
#endif
#ifdef _PHOLD
long local_splay_queue_get_min_ts()
{
long dummy_ts = LONG_MAX - LOOKAHEAD;
CudaSafeCall( hipMemcpyToSymbol(global_min, &dummy_ts, sizeof(long)) );
hipLaunchKernelGGL(( find_min_ts_device_pre), dim3(num_blocks_lps), dim3(num_threads_lps), 0, 0, );
CudaCheckError();
hipLaunchKernelGGL(( find_min_ts_device), dim3(1), dim3(1), 0, 0, );
CudaCheckError();
long min;
CudaSafeCall( hipMemcpyFromSymbol(&min, global_min, sizeof(long)) );
return min;
}
#endif
__global__ void insert_bulk()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
tree_node *root = splay_root[idx];
for(int i = 0; i < insert_count[idx]; i++)
{
root = insert(fel[idx * FEL_SIZE + i], root);
}
splay_root[idx] = root;
insert_count[idx] = 0;
}
}
__device__ void clear_tree(int lp, tree_node *node)
{
if(node == NULL)
{
return;
}
clear_tree(lp, node->left);
clear_tree(lp, node->right);
free_(node);
}
__global__ void queue_clear_(int lp)
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < MALLOC_BUF_SIZE;
idx += blockDim.x * gridDim.x)
{
#ifdef _PHOLD
malloc_buf[lp * MALLOC_BUF_SIZE + idx].item.ts = -1;
#else
queue_item item;
item.f = __float2half(-1.0);
malloc_buf[lp * MALLOC_BUF_SIZE + idx].item = item;
#endif
}
}
__device__ void queue_clear(int lp)
{
hipLaunchKernelGGL(( queue_clear_), dim3(MALLOC_BUF_SIZE / 256 > 0 ? MALLOC_BUF_SIZE / 256 : 1), dim3(256), 0, 0, lp);
splay_root[lp] = NULL;
size[lp] = 0;
insert_count[lp] = 0;
}
void local_splay_queue_pre()
{
}
void local_splay_queue_post()
{
hipLaunchKernelGGL(( insert_bulk), dim3(num_blocks_lps), dim3(num_threads_lps), 0, 0, );
CudaCheckError();
}
void local_splay_queue_post_init()
{
local_splay_queue_post();
}
__device__ bool queue_is_empty(int lp)
{
return size[lp] + insert_count[lp] == 0;
}
__device__ int queue_length(int lp)
{
return size[lp];
}
__device__ void queue_insert_or_update(queue_item item, int lp)
{
queue_insert(item);
}
#endif
| 5b33e0b10710c36fd6963e2a3bd6786c42e3adc5.cu | /*
An implementation of top-down splaying
D. Sleator <[email protected]>
March 1992
"Splay trees", or "self-adjusting search trees" are a simple and
efficient data structure for storing an ordered set. The data
structure consists of a binary tree, without parent pointers, and no
additional fields. It allows searching, insertion, deletion,
deletemin, deletemax, splitting, joining, and many other operations,
all with amortized logarithmic performance. Since the trees adapt to
the sequence of requests, their performance on real access patterns is
typically even better. Splay trees are described in a number of texts
and papers [1,2,3,4,5].
The code here is adapted from simple top-down splay, at the bottom of
page 669 of [3]. It can be obtained via anonymous ftp from
spade.pc.cs.cmu.edu in directory /usr/sleator/public.
The chief modification here is that the splay operation works even if the
item being splayed is not in the tree, and even if the tree root of the
tree is NULL. So the line:
t = splay(i, t);
causes it to search for item with key i in the tree rooted at t. If it's
there, it is splayed to the root. If it isn't there, then the node put
at the root is the last one before NULL that would have been reached in a
normal binary search for i. (It's a neighbor of i in the tree.) This
allows many other operations to be easily implemented, as shown below.
[1] "Fundamentals of data structures in C", Horowitz, Sahni,
and Anderson-Freed, Computer Science Press, pp 542-547.
[2] "Data Structures and Their Algorithms", Lewis and Denenberg,
Harper Collins, 1991, pp 243-251.
[3] "Self-adjusting Binary Search Trees" Sleator and Tarjan,
JACM Volume 32, No 3, July 1985, pp 652-686.
[4] "Data Structure and Algorithm Analysis", Mark Weiss,
Benjamin Cummins, 1992, pp 119-130.
[5] "Data Structures, Algorithms, and Performance", Derick Wood,
Addison-Wesley, 1993, pp 367-375.
*/
#include "config.h"
#ifdef _LOCAL_SPLAY_QUEUE
#include "queue_item.h"
#include "queue.cuh"
#include "local_splay_queue.cuh"
#include <stdio.h>
#include <cub/cub.cuh>
static int num_threads_lps = min(MAX_THREADS__LOCAL_ARRAY_QUEUE, NUM_LPS);
static int num_blocks_lps = (NUM_LPS + num_threads_lps - 1) / num_threads_lps;
__device__ long global_min;
__device__ long lp_min_ts[NUM_LPS];
__device__ queue_item *fel;
__device__ int malloc_buf_pos[NUM_LPS];
__device__ int insert_count[NUM_LPS];
__device__ int size[NUM_LPS]; /* number of nodes in the tree */
/* Not actually needed for any of the operations */
// typedef struct tree_node Tree;
__device__ tree_node *splay_root[NUM_LPS];
__device__ tree_node *malloc_buf;
__device__ tree_node *malloc_()
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// printf("malloc for idx %d\n", idx);
for(int i = 0; i < MALLOC_BUF_SIZE; i++)
{
int offset = (i + malloc_buf_pos[idx]) % MALLOC_BUF_SIZE;
int abs_offset = idx * MALLOC_BUF_SIZE + offset;
#ifdef _PHOLD
if(malloc_buf[abs_offset].item.ts == -1)
#else
if(__half2float(malloc_buf[abs_offset].item.f) == -1.0)
#endif
{
malloc_buf_pos[idx] = (offset + 1) % MALLOC_BUF_SIZE;
return &malloc_buf[abs_offset];
}
}
return NULL;
}
__device__ void free_(tree_node *node)
{
#ifdef _PHOLD
node->item.ts = -1;
#else
queue_item item;
item.f = __float2half(-1.0);
node->item = item;
#endif
}
__device__ tree_node *splay(queue_item i, tree_node * t)
{
/* Simple top down splay, not requiring i to be in the tree t. */
/* What it does is described above. */
tree_node N, *l, *r, *y;
if (t == NULL)
return t;
N.left = N.right = NULL;
l = r = &N;
for (;;) {
if (i <= t->item) {
if (t->left == NULL)
break;
if (i < t->left->item) {
y = t->left; /* rotate right */
t->left = y->right;
y->right = t;
t = y;
if (t->left == NULL)
break;
}
r->left = t; /* link right */
r = t;
t = t->left;
} else if (i > t->item) {
if (t->right == NULL)
break;
if (i > t->right->item) {
y = t->right; /* rotate left */
t->right = y->left;
y->left = t;
t = y;
if (t->right == NULL)
break;
}
l->right = t; /* link left */
l = t;
t = t->right;
} /* else {
break;
} */
}
l->right = t->left; /* assemble */
r->left = t->right;
t->left = N.right;
t->right = N.left;
return t;
}
__device__ tree_node *insert(queue_item i, tree_node * t)
{
/* Insert i into the tree t, unless it's already there. */
/* Return a pointer to the resulting tree. */
int idx = threadIdx.x + blockIdx.x * blockDim.x;
tree_node *new_;
// new_ = (tree_node *) malloc(sizeof(tree_node));
new_ = (tree_node *) malloc_();
// printf("%d: new is %p\n", threadIdx.x, new_);
if (new_ == NULL) {
printf("Ran out of space\n");
return NULL;
}
new_->item = i;
if (t == NULL) {
new_->left = new_->right = NULL;
size[idx] = 1;
return new_;
}
t = splay(i, t);
if (i <= t->item) {
new_->left = t->left;
new_->right = t;
t->left = NULL;
size[idx]++;
return new_;
} else if (i > t->item) {
new_->right = t->right;
new_->left = t;
t->right = NULL;
size[idx]++;
return new_;
}
assert(false);
return NULL;
// else { /* We get here if it's already in the tree */
// /* Don't add it again */
// free_(new_);
// return t;
// }
}
__device__ int counted_size[NUM_LPS];
__device__ void dump_tree(tree_node *node)
{
if(node == splay_root[threadIdx.x])
counted_size[threadIdx.x] = 0;
if(node == NULL)
{
printf("NULL\n");
return;
}
counted_size[threadIdx.x]++;
//printf("%ld\n", node->item.ts);
//printf("%.2f\n", __half2float(node->item.f));
printf("left: ");
dump_tree(node->left);
printf("right: ");
dump_tree(node->right);
printf("up\n");
if(node == splay_root[threadIdx.x] && counted_size[threadIdx.x] != size[threadIdx.x])
printf("%d: size is wrong: %d vs %d\n", threadIdx.x, counted_size[threadIdx.x], size[threadIdx.x]);
}
__device__ tree_node *delete_min(queue_item * min, tree_node * root, int lp)
{
/* Deletes the minimum from the tree */
/* Return a pointer to the resulting tree. */
// printf("%d: root is %p\n", threadIdx.x + blockIdx.x * blockDim.x, root);
tree_node *x;
if (root == NULL)
return NULL;
tree_node *t = root;
while(t->left != NULL)
t = t->left;
*min = t->item;
/* if(!(threadIdx.x + blockIdx.x * blockDim.x))
{
printf("before:\n");
dump_tree(t);
} */
t = splay(t->item, root);
/* if(!(threadIdx.x + blockIdx.x * blockDim.x))
{
printf("after:\n");
dump_tree(t);
} */
if (t->left == NULL) {
// printf("%d: t->left is NULL, t->right is %p\n", threadIdx.x + blockIdx.x * blockDim.x, t->right);
x = t->right;
} else {
// printf("%d: t->left is not NULL, t->right is %p\n", threadIdx.x + blockIdx.x * blockDim.x, t->right);
x = splay(t->item, t->left);
x->right = t->right;
}
size[lp]--;
free_(t);
// printf("%d returning %p\n", threadIdx.x + blockIdx.x * blockDim.x, x);
return x;
}
__device__ bool local_splay_queue_insert(queue_item item)
{
int lp = get_lp(item.node);
int insert_pos = atomicAdd(&insert_count[lp], 1);
int index = lp * FEL_SIZE + insert_pos;
//if (VERBOSE_DEBUG) {
/* #ifdef _PHOLD
if(lp == 1)
printf("inserting item with ts %ld at insert pos %d, index %d\n", item.ts,
insert_pos, index);
#endif */
//}
fel[index] = item;
return true;
}
__global__ void local_splay_queue_init_d()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x; idx < NUM_LPS; idx += blockDim.x * gridDim.x)
{
splay_root[idx] = NULL;
for(int i = 0; i < MALLOC_BUF_SIZE; i++)
{
#ifdef _PHOLD
malloc_buf[idx * MALLOC_BUF_SIZE + i].item.ts = -1;
#else
queue_item item;
item.f = __float2half(-1.0);
malloc_buf[idx * MALLOC_BUF_SIZE + i].item = item;
#endif
}
}
}
void local_splay_queue_init()
{
printf("\n\n-----------------------------------\n");
printf("[ LSQ ] Memory consumption\n");
printf("-----------------------------------\n");
printf(" available: %.2f MB\n", (float) DEVICE_MEMORY_MB);
printf(" enqueue buf: %d MB (%d items per enqueue buffer)\n", FEL_SIZE * ITEM_BYTES * NUM_LPS / 1024 / 1024, FEL_SIZE);
printf(" malloc buf: %d MB (%d items per malloc buffer)\n", MALLOC_BUF_SIZE * NUM_LPS * sizeof(tree_node) / 1024 / 1024, MALLOC_BUF_SIZE);
printf("-----------------------------------\n\n");
queue_item *h_fel;
CudaSafeCall( cudaMalloc(&h_fel, ITEM_BYTES * FEL_SIZE * NUM_NODES) );
CudaSafeCall( cudaMemcpyToSymbol(fel, &h_fel, sizeof(fel)) );
tree_node *h_malloc_buf;
CudaSafeCall( cudaMalloc(&h_malloc_buf, sizeof(tree_node) * MALLOC_BUF_SIZE * NUM_NODES) );
CudaSafeCall( cudaMemcpyToSymbol(malloc_buf, &h_malloc_buf, sizeof(malloc_buf)) );
local_splay_queue_init_d<<<num_blocks_lps, num_threads_lps>>>();
cudaDeviceSynchronize();
}
__device__ int queue_peek(queue_item **item, int lp)
{
// return and splay min
tree_node *root = splay_root[lp];
if (root == NULL)
{
*item = NULL;
return -1;
}
tree_node *t = root;
while(t->left != NULL)
t = t->left;
#ifdef _PHOLD
if(t->item.ts >= global_min + LOOKAHEAD)
{
*item = NULL;
return -1;
}
#endif
*item = &(t->item);
t = splay(t->item, root);
splay_root[lp] = t;
return lp;
}
__device__ void local_splay_queue_set_done(int index)
{
int lp = index;
tree_node *root = splay_root[lp];
queue_item item;
splay_root[lp] = delete_min(&item, root, lp);
// dump_tree(splay_root[lp]);
}
void local_splay_queue_finish()
{
}
#ifdef _PHOLD
static __global__ void find_min_ts_device_pre()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
queue_item *item;
queue_peek(&item, idx);
if (item != NULL) {
lp_min_ts[idx] = item->ts;
} else {
lp_min_ts[idx] = LONG_MAX;
}
}
}
#endif
#ifdef _PHOLD
__device__ void *temp_storage = NULL;
__device__ size_t temp_storage_bytes = 0;
static __global__ void find_min_ts_device()
{
if(!temp_storage)
{
cub::DeviceReduce::Min(temp_storage, temp_storage_bytes, lp_min_ts,
&global_min, NUM_LPS);
CudaSafeCall( cudaMalloc(&temp_storage, temp_storage_bytes) );
}
cub::DeviceReduce::Min(temp_storage, temp_storage_bytes, lp_min_ts,
&global_min, NUM_LPS);
// CudaSafeCall( cudaFree(temp_storage) );
}
#endif
#ifdef _PHOLD
long local_splay_queue_get_min_ts()
{
long dummy_ts = LONG_MAX - LOOKAHEAD;
CudaSafeCall( cudaMemcpyToSymbol(global_min, &dummy_ts, sizeof(long)) );
find_min_ts_device_pre<<<num_blocks_lps, num_threads_lps>>>();
CudaCheckError();
find_min_ts_device<<<1, 1>>>();
CudaCheckError();
long min;
CudaSafeCall( cudaMemcpyFromSymbol(&min, global_min, sizeof(long)) );
return min;
}
#endif
__global__ void insert_bulk()
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < NUM_LPS;
idx += blockDim.x * gridDim.x) {
tree_node *root = splay_root[idx];
for(int i = 0; i < insert_count[idx]; i++)
{
root = insert(fel[idx * FEL_SIZE + i], root);
}
splay_root[idx] = root;
insert_count[idx] = 0;
}
}
__device__ void clear_tree(int lp, tree_node *node)
{
if(node == NULL)
{
return;
}
clear_tree(lp, node->left);
clear_tree(lp, node->right);
free_(node);
}
__global__ void queue_clear_(int lp)
{
for (int idx = threadIdx.x + blockDim.x * blockIdx.x;
idx < MALLOC_BUF_SIZE;
idx += blockDim.x * gridDim.x)
{
#ifdef _PHOLD
malloc_buf[lp * MALLOC_BUF_SIZE + idx].item.ts = -1;
#else
queue_item item;
item.f = __float2half(-1.0);
malloc_buf[lp * MALLOC_BUF_SIZE + idx].item = item;
#endif
}
}
__device__ void queue_clear(int lp)
{
queue_clear_<<<MALLOC_BUF_SIZE / 256 > 0 ? MALLOC_BUF_SIZE / 256 : 1, 256>>>(lp);
splay_root[lp] = NULL;
size[lp] = 0;
insert_count[lp] = 0;
}
void local_splay_queue_pre()
{
}
void local_splay_queue_post()
{
insert_bulk<<<num_blocks_lps, num_threads_lps>>>();
CudaCheckError();
}
void local_splay_queue_post_init()
{
local_splay_queue_post();
}
__device__ bool queue_is_empty(int lp)
{
return size[lp] + insert_count[lp] == 0;
}
__device__ int queue_length(int lp)
{
return size[lp];
}
__device__ void queue_insert_or_update(queue_item item, int lp)
{
queue_insert(item);
}
#endif
|
276199cd54cdf137cc4b2cd11aabcd7296375a45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "timer.h"
#include <math.h>
#include <stdlib.h>
#define PG_SIZE (4096)
#define BYTE_SIZE (256)
#define BLOCK_SIZE 32
// block shared memory
__global__
void entropy(int data_size, char *data_old, char *data_new, double* ent)
{
//dynamic shared memory
int gthread_idx = blockIdx.x*blockDim.x + threadIdx.x;
// int lthread_idx = threadIdx.x;
int size = PG_SIZE; //
int gbuf_idx = gthread_idx * size;
int i;
//init shared variable
//int ent_cnt=0;
unsigned short ent_cnt[BYTE_SIZE]={0,};
double ent_v, Pb;
for(i=gbuf_idx; i<gbuf_idx+size && i<data_size; i++)
{
//similarity calculation
//sim_cnt += (data_old[i] == data_new[i]);
ent_cnt[data_new[i]]++;
}
for(i=0; i<BYTE_SIZE; i++)
{
Pb = ((double)ent_cnt[i]) / PG_SIZE;
//Pb = 16.0/PG_SIZE;//1.0/256;
if(Pb!=0) ent_v += Pb * (-log2(Pb));
// printf("%lf, %lf\n", ent_v,Pb);
}
ent[gthread_idx] = ent_v;
//sim[gthread_idx] = sim_cnt;
//printf("%d %d\n", lthread_idx, sim_cnt);
}
int main(int argc, char *argv[])
{
int data_size = 1<<30;//1<<30; //1GB
char *data_old, *data_new;
char *d_do, *d_dn;
int num_threads;
int num_blocks;
double *ent, *d_ent;
double start, finish;
//int i;
//
data_old = (char*)malloc(data_size*sizeof(char)); //1GB
data_new = (char*)malloc(data_size*sizeof(char)); //1GB
ent = (double*)malloc((data_size/PG_SIZE)*sizeof(double)); //256K*12B = 3KB
//
hipMalloc(&d_do, data_size*sizeof(char)); //1GB
hipMalloc(&d_dn, data_size*sizeof(char)); //1GB
hipMalloc(&d_ent, (data_size/PG_SIZE) * sizeof(double)); //3KB
//
for (int i = 0; i < data_size; i++) {
data_old[i] = 1;
data_new[i] = 1;
}
//printf("Start Evaluation\n");
//printf("1. memcpy\n");
GET_TIME(start);
hipMemcpy(d_do, data_old, data_size*sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(d_dn, data_new, data_size*sizeof(char), hipMemcpyHostToDevice);
//block size ()
//num_threads = 32;
if(argc!=2)
{
printf("Input Error\n");
return -1;
}
num_threads = atoi(argv[1]);
num_blocks = data_size/PG_SIZE/num_threads; //1GB/32??
printf("nthread, nblock: %d %d\n", num_threads, num_blocks);
// Perform sim_ent function on 1GB elements
//3 shared memory = 516Bytes * 32 = 16KB (L1cache size 48KB)
hipLaunchKernelGGL(( entropy), dim3(num_blocks), dim3(num_threads), 0, 0, data_size, d_do, d_dn, d_ent);
hipMemcpy(ent, d_ent, (data_size/PG_SIZE) * sizeof(double), hipMemcpyDeviceToHost);
//for(int i=0; i<data_size/PG_SIZE; i++)
// printf("ent: %lf\n", ent[i]);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
hipFree(d_do);
hipFree(d_dn);
hipFree(d_ent);
free(data_old);
free(data_new);
free(ent);
} | 276199cd54cdf137cc4b2cd11aabcd7296375a45.cu | #include <stdio.h>
#include "timer.h"
#include <math.h>
#include <stdlib.h>
#define PG_SIZE (4096)
#define BYTE_SIZE (256)
#define BLOCK_SIZE 32
//각 block이 공유하는 shared memory
__global__
void entropy(int data_size, char *data_old, char *data_new, double* ent)
{
//dynamic shared memory
int gthread_idx = blockIdx.x*blockDim.x + threadIdx.x;
// int lthread_idx = threadIdx.x;
int size = PG_SIZE; //각 쓰레드가 담당할 데이터 영역 크기
int gbuf_idx = gthread_idx * size;
int i;
//init shared variable
//int ent_cnt=0;
unsigned short ent_cnt[BYTE_SIZE]={0,};
double ent_v, Pb;
for(i=gbuf_idx; i<gbuf_idx+size && i<data_size; i++)
{
//similarity calculation
//sim_cnt += (data_old[i] == data_new[i]);
ent_cnt[data_new[i]]++;
}
for(i=0; i<BYTE_SIZE; i++)
{
Pb = ((double)ent_cnt[i]) / PG_SIZE;
//Pb = 16.0/PG_SIZE;//1.0/256;
if(Pb!=0) ent_v += Pb * (-log2(Pb));
// printf("%lf, %lf\n", ent_v,Pb);
}
ent[gthread_idx] = ent_v;
//sim[gthread_idx] = sim_cnt;
//printf("%d %d\n", lthread_idx, sim_cnt);
}
int main(int argc, char *argv[])
{
int data_size = 1<<30;//1<<30; //1GB
char *data_old, *data_new;
char *d_do, *d_dn;
int num_threads;
int num_blocks;
double *ent, *d_ent;
double start, finish;
//int i;
//데이터 초기화
data_old = (char*)malloc(data_size*sizeof(char)); //1GB
data_new = (char*)malloc(data_size*sizeof(char)); //1GB
ent = (double*)malloc((data_size/PG_SIZE)*sizeof(double)); //256K*12B = 3KB
//디바이스 데이터 할당
cudaMalloc(&d_do, data_size*sizeof(char)); //1GB
cudaMalloc(&d_dn, data_size*sizeof(char)); //1GB
cudaMalloc(&d_ent, (data_size/PG_SIZE) * sizeof(double)); //3KB
//데이터 초기화
for (int i = 0; i < data_size; i++) {
data_old[i] = 1;
data_new[i] = 1;
}
//printf("Start Evaluation\n");
//printf("1. memcpy\n");
GET_TIME(start);
cudaMemcpy(d_do, data_old, data_size*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_dn, data_new, data_size*sizeof(char), cudaMemcpyHostToDevice);
//block size 설정 (조정가능)
//num_threads = 32;
if(argc!=2)
{
printf("Input Error\n");
return -1;
}
num_threads = atoi(argv[1]);
num_blocks = data_size/PG_SIZE/num_threads; //1GB/32?? 상관무
printf("nthread, nblock: %d %d\n", num_threads, num_blocks);
// Perform sim_ent function on 1GB elements
//3번째인자는 shared memory의 크기 = 516Bytes * 32 = 16KB (L1cache size 48KB)
entropy<<<num_blocks, num_threads>>>(data_size, d_do, d_dn, d_ent);
cudaMemcpy(ent, d_ent, (data_size/PG_SIZE) * sizeof(double), cudaMemcpyDeviceToHost);
//for(int i=0; i<data_size/PG_SIZE; i++)
// printf("ent: %lf\n", ent[i]);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
cudaFree(d_do);
cudaFree(d_dn);
cudaFree(d_ent);
free(data_old);
free(data_new);
free(ent);
} |
61aae23c06c0673593b7084f05aadd02b8722103.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <bcl/backends/experimental/nvshmem/backend.hpp>
#include <bcl/containers/experimental/cuda/sequential/device_vector.cuh>
#include <bcl/containers/experimental/cuda/launch_kernel.cuh>
__global__ void kernel(BCL::cuda::device_vector<int> vec) {
size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
int value = vec.data()[tid];
printf("%lu: %d\n", tid, value);
}
int main(int argc, char** argv) {
hipSetDevice(0);
constexpr size_t n = 16;
std::vector<int> vec(n);
for (size_t i = 0; i < vec.size(); i++) {
vec[i] = i;
}
BCL::cuda::device_vector<int> v(vec.begin(), vec.end());
printf("First Launch (should be in order 0 -> n)\n");
fflush(stdout);
BCL::cuda::launch(v.size(),
[] __device__ (size_t tid, BCL::cuda::device_vector<int> v) {
int val = v[tid];
printf("Element %lu == %d\n", tid, val);
}, v);
hipDeviceSynchronize();
fflush(stdout);
printf("Second Launch (modifying values)\n");
fflush(stdout);
BCL::cuda::launch(v.size(),
[] __device__ (size_t tid, BCL::cuda::device_vector<int> v) {
v[tid] = v.size() - tid;
}, v);
hipDeviceSynchronize();
fflush(stdout);
printf("Third Launch (should be in order n -> 1)\n");
fflush(stdout);
BCL::cuda::launch(v.size(),
[] __device__ (size_t tid, BCL::cuda::device_vector<int> v) {
int val = v[tid];
printf("Element %lu == %d\n", tid, val);
}, v);
hipDeviceSynchronize();
fflush(stdout);
v.destroy();
return 0;
}
| 61aae23c06c0673593b7084f05aadd02b8722103.cu | #include <cuda.h>
#include <bcl/backends/experimental/nvshmem/backend.hpp>
#include <bcl/containers/experimental/cuda/sequential/device_vector.cuh>
#include <bcl/containers/experimental/cuda/launch_kernel.cuh>
__global__ void kernel(BCL::cuda::device_vector<int> vec) {
size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
int value = vec.data()[tid];
printf("%lu: %d\n", tid, value);
}
int main(int argc, char** argv) {
cudaSetDevice(0);
constexpr size_t n = 16;
std::vector<int> vec(n);
for (size_t i = 0; i < vec.size(); i++) {
vec[i] = i;
}
BCL::cuda::device_vector<int> v(vec.begin(), vec.end());
printf("First Launch (should be in order 0 -> n)\n");
fflush(stdout);
BCL::cuda::launch(v.size(),
[] __device__ (size_t tid, BCL::cuda::device_vector<int> v) {
int val = v[tid];
printf("Element %lu == %d\n", tid, val);
}, v);
cudaDeviceSynchronize();
fflush(stdout);
printf("Second Launch (modifying values)\n");
fflush(stdout);
BCL::cuda::launch(v.size(),
[] __device__ (size_t tid, BCL::cuda::device_vector<int> v) {
v[tid] = v.size() - tid;
}, v);
cudaDeviceSynchronize();
fflush(stdout);
printf("Third Launch (should be in order n -> 1)\n");
fflush(stdout);
BCL::cuda::launch(v.size(),
[] __device__ (size_t tid, BCL::cuda::device_vector<int> v) {
int val = v[tid];
printf("Element %lu == %d\n", tid, val);
}, v);
cudaDeviceSynchronize();
fflush(stdout);
v.destroy();
return 0;
}
|
1849501f245aa9f47bd4538f7a2ce09c5dc598a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
template<typename Iterator, typename Compare, typename Iterator2>
__global__
void sort_kernel(Iterator first, Iterator last, Compare comp, Iterator2 is_supported)
{
#if (__CUDA_ARCH__ >= 200)
*is_supported = true;
thrust::sort(thrust::seq, first, last, comp);
#else
*is_supported = false;
#endif
}
template<typename T>
struct TestSortDeviceSeq
{
void operator()(const size_t n)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
thrust::device_vector<bool> is_supported(1);
hipLaunchKernelGGL(( sort_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), thrust::less<T>(), is_supported.begin());
if(is_supported[0])
{
thrust::sort(h_data.begin(), h_data.end(), thrust::less<T>());
ASSERT_EQUAL(h_data, d_data);
}
}
};
VariableUnitTest<
TestSortDeviceSeq,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestSortDeviceSeqInstance;
| 1849501f245aa9f47bd4538f7a2ce09c5dc598a6.cu | #include <unittest/unittest.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
template<typename Iterator, typename Compare, typename Iterator2>
__global__
void sort_kernel(Iterator first, Iterator last, Compare comp, Iterator2 is_supported)
{
#if (__CUDA_ARCH__ >= 200)
*is_supported = true;
thrust::sort(thrust::seq, first, last, comp);
#else
*is_supported = false;
#endif
}
template<typename T>
struct TestSortDeviceSeq
{
void operator()(const size_t n)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
thrust::device_vector<bool> is_supported(1);
sort_kernel<<<1,1>>>(d_data.begin(), d_data.end(), thrust::less<T>(), is_supported.begin());
if(is_supported[0])
{
thrust::sort(h_data.begin(), h_data.end(), thrust::less<T>());
ASSERT_EQUAL(h_data, d_data);
}
}
};
VariableUnitTest<
TestSortDeviceSeq,
unittest::type_list<unittest::int8_t,unittest::int32_t>
> TestSortDeviceSeqInstance;
|
1846eab2aeb17d8f24a9285d602d4d28900b5af7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_minus_4_top [3][2];
static int dims_update_halo_kernel2_yvel_minus_4_top_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_minus_4_top_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = -yvel0(0,-4,0);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = -yvel1(0,-4,0);
}
__global__ void ops_update_halo_kernel2_yvel_minus_4_top(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_minus_4_top[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_minus_4_top[0][0] * dims_update_halo_kernel2_yvel_minus_4_top[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_minus_4_top[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_minus_4_top[1][0] * dims_update_halo_kernel2_yvel_minus_4_top[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_minus_4_top[0][0], dims_update_halo_kernel2_yvel_minus_4_top[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_minus_4_top[1][0], dims_update_halo_kernel2_yvel_minus_4_top[1][1], arg1);
update_halo_kernel2_yvel_minus_4_top_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,37)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(37,"update_halo_kernel2_yvel_minus_4_top");
OPS_kernels[37].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_minus_4_top_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_minus_4_top_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_minus_4_top_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_minus_4_top_h[1][1]) {
dims_update_halo_kernel2_yvel_minus_4_top_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_minus_4_top_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_minus_4_top_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_minus_4_top_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_yvel_minus_4_top, dims_update_halo_kernel2_yvel_minus_4_top_h, sizeof(dims_update_halo_kernel2_yvel_minus_4_top)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[37].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_minus_4_top), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[37].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[37].mpi_time += t2-t1;
OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 37;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 37;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_minus_4_top_execute;
if (OPS_diags > 1) {
ops_timing_realloc(37,"update_halo_kernel2_yvel_minus_4_top");
}
ops_enqueue_kernel(desc);
}
#endif
| 1846eab2aeb17d8f24a9285d602d4d28900b5af7.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_minus_4_top [3][2];
static int dims_update_halo_kernel2_yvel_minus_4_top_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_minus_4_top_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = -yvel0(0,-4,0);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = -yvel1(0,-4,0);
}
__global__ void ops_update_halo_kernel2_yvel_minus_4_top(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_minus_4_top[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_minus_4_top[0][0] * dims_update_halo_kernel2_yvel_minus_4_top[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_minus_4_top[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_minus_4_top[1][0] * dims_update_halo_kernel2_yvel_minus_4_top[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_minus_4_top[0][0], dims_update_halo_kernel2_yvel_minus_4_top[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_minus_4_top[1][0], dims_update_halo_kernel2_yvel_minus_4_top[1][1], arg1);
update_halo_kernel2_yvel_minus_4_top_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,37)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(37,"update_halo_kernel2_yvel_minus_4_top");
OPS_kernels[37].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_minus_4_top_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_minus_4_top_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_minus_4_top_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_minus_4_top_h[1][1]) {
dims_update_halo_kernel2_yvel_minus_4_top_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_minus_4_top_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_minus_4_top_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_minus_4_top_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_yvel_minus_4_top, dims_update_halo_kernel2_yvel_minus_4_top_h, sizeof(dims_update_halo_kernel2_yvel_minus_4_top)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[37].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_yvel_minus_4_top<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[37].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[37].mpi_time += t2-t1;
OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 37;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 37;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_minus_4_top_execute;
if (OPS_diags > 1) {
ops_timing_realloc(37,"update_halo_kernel2_yvel_minus_4_top");
}
ops_enqueue_kernel(desc);
}
#endif
|
0758b3416eb186c1489f6619e90b0d3c2a5ce6db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void tissueGPU3Kernel(float *d_tissxyz, float *d_vessxyz, float *d_pt000, float *d_qv000, int nnt, int nnv, int is2d, float req, float r2d)
{
int itp = blockDim.x * blockIdx.x + threadIdx.x;
int jvp,nnv2=2*nnv;
float p = 0., xt,yt,zt,x,y,z,dist2,gtv,req2=req*req,r2d2=r2d*r2d;
if(itp < nnt){
xt = d_tissxyz[itp];
yt = d_tissxyz[itp+nnt];
zt = d_tissxyz[itp+nnt*2];
for(jvp=0; jvp<nnv; jvp++){
x = d_vessxyz[jvp] - xt;
y = d_vessxyz[jvp+nnv] - yt;
z = d_vessxyz[jvp+nnv2] - zt;
dist2 = x*x + y*y + z*z;
if(dist2 < req2){
if(is2d) gtv = log(r2d2/req2) + 1. - dist2/req2;
else gtv = (1.5 - 0.5*dist2/req2)/req;
}
else{
if(is2d) gtv = log(r2d2/dist2);
else gtv = 1./sqrt(dist2);
}
p += d_qv000[jvp]*gtv;
}
d_pt000[itp] = p;
}
} | 0758b3416eb186c1489f6619e90b0d3c2a5ce6db.cu | #include "includes.h"
__global__ void tissueGPU3Kernel(float *d_tissxyz, float *d_vessxyz, float *d_pt000, float *d_qv000, int nnt, int nnv, int is2d, float req, float r2d)
{
int itp = blockDim.x * blockIdx.x + threadIdx.x;
int jvp,nnv2=2*nnv;
float p = 0., xt,yt,zt,x,y,z,dist2,gtv,req2=req*req,r2d2=r2d*r2d;
if(itp < nnt){
xt = d_tissxyz[itp];
yt = d_tissxyz[itp+nnt];
zt = d_tissxyz[itp+nnt*2];
for(jvp=0; jvp<nnv; jvp++){
x = d_vessxyz[jvp] - xt;
y = d_vessxyz[jvp+nnv] - yt;
z = d_vessxyz[jvp+nnv2] - zt;
dist2 = x*x + y*y + z*z;
if(dist2 < req2){
if(is2d) gtv = log(r2d2/req2) + 1. - dist2/req2;
else gtv = (1.5 - 0.5*dist2/req2)/req;
}
else{
if(is2d) gtv = log(r2d2/dist2);
else gtv = 1./sqrt(dist2);
}
p += d_qv000[jvp]*gtv;
}
d_pt000[itp] = p;
}
} |
7e35d6fb348ef993e82f2bb04592aa4bb99d96c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "initKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *temperature = NULL;
hipMalloc(&temperature, XSIZE*YSIZE);
int block_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
initKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temperature,block_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
initKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temperature,block_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
initKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temperature,block_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7e35d6fb348ef993e82f2bb04592aa4bb99d96c7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "initKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *temperature = NULL;
cudaMalloc(&temperature, XSIZE*YSIZE);
int block_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
initKernel<<<gridBlock,threadBlock>>>(temperature,block_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
initKernel<<<gridBlock,threadBlock>>>(temperature,block_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
initKernel<<<gridBlock,threadBlock>>>(temperature,block_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2fd9bd23cda3f5da83fa06cb538ae5de6af66021.hip | // !!! This is a file automatically generated by hipify!!!
#pragma warning(disable: 4819)
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <device_launch_parameters.h>
#include "visible_check.cuh"
using Vec3 = float[3];
__constant__ float c_dev[3];
__global__
void kernel_func_gpu(const float* const __restrict__ cloud, char* const __restrict__ is_visible, const float lambda_sqrd, const int i, const int offset)
{
const unsigned int j = blockIdx.x * blockDim.x + threadIdx.x + offset;
const float* const p = &cloud[3 * i];
const float* const q = &cloud[3 * j];
if (is_visible[i] == 0) return;
if (j <= i) return;
if (is_visible[j] == 0) return;
Vec3 v;
v[0] = p[0] - c_dev[0];
v[1] = p[1] - c_dev[1];
v[2] = p[2] - c_dev[2];
const float v_sqrd = v[0] * v[0] + v[1] * v[1] + v[2] * v[2];
Vec3 ta;
ta[0] = q[0] - c_dev[0];
ta[1] = q[1] - c_dev[1];
ta[2] = q[2] - c_dev[2];
const float tb = v[0] * ta[0] + v[1] * ta[1] + v[2] * ta[2];
const float k = tb / v_sqrd;
Vec3 tc;
tc[0] = ta[0] - k * v[0];
tc[1] = ta[1] - k * v[1];
tc[2] = ta[2] - k * v[2];
const float r_sqrd = tc[0] * tc[0] + tc[1] * tc[1] + tc[2] * tc[2];
const float s_sqrd = k * k * v_sqrd;
if (r_sqrd < lambda_sqrd * s_sqrd)
{
is_visible[j] = 0;
}
}
__host__
void set_constant_var(const float* const c)
{
CUDA_SAFE_CALL(hipMemcpyToSymbol(c_dev, c, sizeof(float) * 3, 0, hipMemcpyHostToDevice));
}
__host__
void call_kernel_func_gpu(const float* const cloud_dev, char* const is_visible_dev, const float lambda_sqrd, const int i, const int numel)
{
const int thread_num = 1024;
const int grid_num = static_cast<int>(::floor(numel / thread_num));
hipLaunchKernelGGL(( kernel_func_gpu) , dim3(grid_num), dim3(thread_num) , 0, 0, cloud_dev, is_visible_dev, lambda_sqrd, i, 0);
if (numel % thread_num != 0)
{
const int offset = grid_num * thread_num;
hipLaunchKernelGGL(( kernel_func_gpu) , dim3(1), dim3(numel % thread_num) , 0, 0, cloud_dev, is_visible_dev, lambda_sqrd, i, offset);
}
//hipDeviceSynchronize();
} | 2fd9bd23cda3f5da83fa06cb538ae5de6af66021.cu | #pragma warning(disable: 4819)
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_functions.h>
#include <device_launch_parameters.h>
#include "visible_check.cuh"
using Vec3 = float[3];
__constant__ float c_dev[3];
__global__
void kernel_func_gpu(const float* const __restrict__ cloud, char* const __restrict__ is_visible, const float lambda_sqrd, const int i, const int offset)
{
const unsigned int j = blockIdx.x * blockDim.x + threadIdx.x + offset;
const float* const p = &cloud[3 * i];
const float* const q = &cloud[3 * j];
if (is_visible[i] == 0) return;
if (j <= i) return;
if (is_visible[j] == 0) return;
Vec3 v;
v[0] = p[0] - c_dev[0];
v[1] = p[1] - c_dev[1];
v[2] = p[2] - c_dev[2];
const float v_sqrd = v[0] * v[0] + v[1] * v[1] + v[2] * v[2];
Vec3 ta;
ta[0] = q[0] - c_dev[0];
ta[1] = q[1] - c_dev[1];
ta[2] = q[2] - c_dev[2];
const float tb = v[0] * ta[0] + v[1] * ta[1] + v[2] * ta[2];
const float k = tb / v_sqrd;
Vec3 tc;
tc[0] = ta[0] - k * v[0];
tc[1] = ta[1] - k * v[1];
tc[2] = ta[2] - k * v[2];
const float r_sqrd = tc[0] * tc[0] + tc[1] * tc[1] + tc[2] * tc[2];
const float s_sqrd = k * k * v_sqrd;
if (r_sqrd < lambda_sqrd * s_sqrd)
{
is_visible[j] = 0;
}
}
__host__
void set_constant_var(const float* const c)
{
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_dev, c, sizeof(float) * 3, 0, cudaMemcpyHostToDevice));
}
__host__
void call_kernel_func_gpu(const float* const cloud_dev, char* const is_visible_dev, const float lambda_sqrd, const int i, const int numel)
{
const int thread_num = 1024;
const int grid_num = static_cast<int>(std::floor(numel / thread_num));
kernel_func_gpu <<< grid_num, thread_num >>>(cloud_dev, is_visible_dev, lambda_sqrd, i, 0);
if (numel % thread_num != 0)
{
const int offset = grid_num * thread_num;
kernel_func_gpu <<< 1, numel % thread_num >>>(cloud_dev, is_visible_dev, lambda_sqrd, i, offset);
}
//cudaThreadSynchronize();
} |
41e150f9a6c7266302cd79ee75e827400468bfe4.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "hip/hip_fp16.h"
#include "hip/hip_runtime.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = { sizeof(half) * iGiveSize,iOutSize*sizeof(half)};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum]) cuda_free_allType(publicMemory[cnum]);
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(hipPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(hipPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(hipMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
/*check_error(hipMemcpy(l.weights_gpu, halfWeights, l.nweights * sizeof(half), hipMemcpyDeviceToDevice));
cuda_free_allType(halfWeights);
DecGenerateMemory(l.nweights * sizeof(half));*/
}
__global__ void add_bias_half_kernel(float* output, float* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch * n + filter) * size + offset] += biases[filter];
}
void add_bias_half_gpu(float* output, float* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(hipPeekAtLastError());
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
hipError_t stats = hipMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), hipMemcpyDeviceToDevice);
}*/
#endif
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(l.output_gpu, l.outputs, 0);
// exit(0);
#endif
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
#endif
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if (l.binary || l.xnor) swap_binary(&l);
} | 41e150f9a6c7266302cd79ee75e827400468bfe4.cu | #include "pch.h"
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "cuda_fp16.h"
#include "cuda.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = { sizeof(half) * iGiveSize,iOutSize*sizeof(half)};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum]) cuda_free_allType(publicMemory[cnum]);
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(cudaPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(cudaPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(cudaMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
/*check_error(cudaMemcpy(l.weights_gpu, halfWeights, l.nweights * sizeof(half), cudaMemcpyDeviceToDevice));
cuda_free_allType(halfWeights);
DecGenerateMemory(l.nweights * sizeof(half));*/
}
__global__ void add_bias_half_kernel(float* output, float* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch * n + filter) * size + offset] += biases[filter];
}
void add_bias_half_gpu(float* output, float* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(cudaPeekAtLastError());
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
cudaError_t stats = cudaMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), cudaMemcpyDeviceToDevice);
}*/
#endif
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(l.output_gpu, l.outputs, 0);
// exit(0);
#endif
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
#endif
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if (l.binary || l.xnor) swap_binary(&l);
} |
32c3523f0be87719b5f3dff16e2608d1ea2e8269.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
////////////////////////////////////////////////////////////////////////////////
//
// NVIDIA CUDA implementation of Viola-Jones Object Detection Framework
//
// The algorithm and code are explained in the upcoming GPU Computing Gems
// chapter in detail:
//
// Anton Obukhov, "Haar Classifiers for Object Detection with CUDA"
// PDF URL placeholder
// email: [email protected], [email protected]
//
// Credits for help with the code to:
// Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov.
//
////////////////////////////////////////////////////////////////////////////////
#include <algorithm>
#include <cstdio>
#include "NCV.hpp"
#include "NPP_staging/NPP_staging.hpp"
#include "NCVRuntimeTemplates.hpp"
#include "NCVHaarObjectDetection.hpp"
//==============================================================================
//
// BlockScan file
//
//==============================================================================
NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE and size is power of 2
template <class T>
inline __device__ T warpScanInclusive(T idata, volatile T *s_Data)
{
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
//for(Ncv32u offset = 1; offset < K_WARP_SIZE; offset <<= 1)
//{
// s_Data[pos] += s_Data[pos - offset];
//}
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
}
template <class T>
inline __device__ T warpScanExclusive(T idata, volatile T *s_Data)
{
return warpScanInclusive(idata, s_Data) - idata;
}
template <class T, Ncv32u tiNumScanThreads>
inline __device__ T blockScanInclusive(T idata, volatile T *s_Data)
{
if (tiNumScanThreads > K_WARP_SIZE)
{
//Bottom-level inclusive warp scan
T warpResult = warpScanInclusive(idata, s_Data);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) )
{
s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) )
{
//grab top warp elements
T val = s_Data[threadIdx.x];
//calculate exclusive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE];
}
else
{
return warpScanInclusive(idata, s_Data);
}
}
//==============================================================================
//
// HaarClassifierCascade file
//
//==============================================================================
const Ncv32u MAX_GRID_DIM = 65535;
const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64;
#define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6
#define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2)
/** \internal
* Haar features solid array.
*/
texture<uint2, 1, hipReadModeElementType> texHaarFeatures;
/** \internal
* Haar classifiers flattened trees container.
* Two parts: first contains root nodes, second - nodes that are referred by root nodes.
* Drawback: breaks tree locality (might cause more cache misses
* Advantage: No need to introduce additional 32-bit field to index root nodes offsets
*/
texture<uint4, 1, hipReadModeElementType> texHaarClassifierNodes;
texture<Ncv32u, 1, hipReadModeElementType> texIImage;
__device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages)
{
return d_Stages[iStage];
}
template <NcvBool tbCacheTextureCascade>
__device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes)
{
HaarClassifierNode128 tmpNode;
if (tbCacheTextureCascade)
{
tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode);
}
else
{
tmpNode = d_ClassifierNodes[iNode];
}
return tmpNode;
}
template <NcvBool tbCacheTextureCascade>
__device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features,
Ncv32f *weight,
Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight)
{
HaarFeature64 feature;
if (tbCacheTextureCascade)
{
feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature);
}
else
{
feature = d_Features[iFeature];
}
feature.getRect(rectX, rectY, rectWidth, rectHeight);
*weight = feature.getWeight();
}
template <NcvBool tbCacheTextureIImg>
__device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg)
{
if (tbCacheTextureIImg)
{
return tex1Dfetch(texIImage, x);
}
else
{
return d_IImg[x];
}
}
__device__ Ncv32f reduceSpecialization(Ncv32f partialSum)
{
__shared__ volatile Ncv32f reductor[NUM_THREADS_CLASSIFIERPARALLEL];
reductor[threadIdx.x] = partialSum;
__syncthreads();
#if defined CPU_FP_COMPLIANCE
if (!threadIdx.x)
{
Ncv32f sum = 0.0f;
for (int i=0; i<NUM_THREADS_CLASSIFIERPARALLEL; i++)
{
sum += reductor[i];
}
reductor[0] = sum;
}
#else
#if NUM_THREADS_CLASSIFIERPARALLEL_LOG2 >= 8
if (threadIdx.x < 128)
{
reductor[threadIdx.x] += reductor[threadIdx.x + 128];
}
__syncthreads();
#endif
#if NUM_THREADS_CLASSIFIERPARALLEL_LOG2 >= 7
if (threadIdx.x < 64)
{
reductor[threadIdx.x] += reductor[threadIdx.x + 64];
}
__syncthreads();
#endif
if (threadIdx.x < 32)
{
#if NUM_THREADS_CLASSIFIERPARALLEL_LOG2 >= 6
reductor[threadIdx.x] += reductor[threadIdx.x + 32];
#endif
#if NUM_THREADS_CLASSIFIERPARALLEL_LOG2 >= 5
reductor[threadIdx.x] += reductor[threadIdx.x + 16];
#endif
reductor[threadIdx.x] += reductor[threadIdx.x + 8];
reductor[threadIdx.x] += reductor[threadIdx.x + 4];
reductor[threadIdx.x] += reductor[threadIdx.x + 2];
reductor[threadIdx.x] += reductor[threadIdx.x + 1];
}
#endif
__syncthreads();
return reductor[0];
}
__device__ Ncv32u d_outMaskPosition;
__inline __device__ void compactBlockWriteOutAnchorParallel(NcvBool threadPassFlag,
Ncv32u threadElem,
Ncv32u *vectorOut)
{
#if __CUDA_ARCH__ >= 110
Ncv32u passMaskElem = threadPassFlag ? 1 : 0;
__shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2];
Ncv32u incScan = blockScanInclusive<Ncv32u, NUM_THREADS_ANCHORSPARALLEL>(passMaskElem, shmem);
__syncthreads();
Ncv32u excScan = incScan - passMaskElem;
__shared__ Ncv32u numPassed;
__shared__ Ncv32u outMaskOffset;
if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1)
{
numPassed = incScan;
outMaskOffset = atomicAdd(&d_outMaskPosition, incScan);
}
__syncthreads();
if (threadPassFlag)
{
shmem[excScan] = threadElem;
}
__syncthreads();
if (threadIdx.x < numPassed)
{
vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x];
}
#endif
}
template <NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u y_offs;
Ncv32u x_offs;
Ncv32u maskOffset;
Ncv32u outMaskVal;
NcvBool bInactiveThread = false;
if (tbReadPixelIndexFromVector)
{
maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (maskOffset >= mask1Dlen)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
outMaskVal = d_inMask[maskOffset];
y_offs = outMaskVal >> 16;
x_offs = outMaskVal & 0xFFFF;
}
}
else
{
y_offs = blockIdx.y;
x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (x_offs >= mask2Dstride)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
maskOffset = y_offs * mask2Dstride + x_offs;
if ((x_offs >= anchorsRoi.width) ||
(!tbInitMaskPositively &&
d_inMask != d_outMask &&
d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U))
{
if (tbDoAtomicCompaction)
{
bInactiveThread = true;
}
else
{
d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U;
return;
}
}
outMaskVal = (y_offs << 16) | x_offs;
}
}
NcvBool bPass = true;
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++)
{
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset();
Ncv32f stageThreshold = curStage.getStageThreshold();
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u iNode = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
__syncthreads();
curRootNodeOffset++;
}
if (curStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
}
__syncthreads();
if (!tbDoAtomicCompaction)
{
if (!tbReadPixelIndexFromVector ||
(tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask)))
{
d_outMask[maskOffset] = outMaskVal;
}
}
else
{
compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread,
outMaskVal,
d_outMask);
}
}
template <NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x;
if (maskOffset >= mask1Dlen)
{
return;
}
Ncv32u outMaskVal = d_inMask[maskOffset];
Ncv32u y_offs = outMaskVal >> 16;
Ncv32u x_offs = outMaskVal & 0xFFFF;
Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
NcvBool bPass = true;
for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++)
{
//this variable is subject to reduction
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x;
Ncv32f stageThreshold = curStage.getStageThreshold();
Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2;
for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++)
{
NcvBool bMoreNodesToTraverse = true;
if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage)
{
Ncv32u iNode = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
//TODO: fetch into shmem if size suffices. Shmem can be shared with reduce
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
}
__syncthreads();
curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL;
}
Ncv32f finalStageSum = reduceSpecialization(curStageSum);
if (finalStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
if (!tbDoAtomicCompaction)
{
if (!bPass || d_inMask != d_outMask)
{
if (!threadIdx.x)
{
d_outMask[maskOffset] = outMaskVal;
}
}
}
else
{
#if __CUDA_ARCH__ >= 110
if (bPass && !threadIdx.x)
{
Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1);
d_outMask[outMaskOffset] = outMaskVal;
}
#endif
}
}
template <NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction>
__global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
Ncv32u y_offs = blockIdx.y;
Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs;
Ncv32u y_offs_upsc = step * y_offs;
Ncv32u x_offs_upsc = step * x_offs;
Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc;
Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U;
if (x_offs_upsc < anchorsRoi.width &&
(!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U))
{
outElem = (y_offs_upsc << 16) | x_offs_upsc;
}
if (!tbDoAtomicCompaction)
{
d_outMask[outMaskOffset] = outElem;
}
else
{
compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U,
outElem,
d_outMask);
}
}
struct applyHaarClassifierAnchorParallelFunctor
{
dim3 gridConf, blockConf;
hipStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
hipLaunchKernelGGL(( applyHaarClassifierAnchorParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value,
Loki::TL::TypeAt<TList, 3>::Result::value,
Loki::TL::TypeAt<TList, 4>::Result::value >)
, dim3(gridConf), dim3(blockConf), 0, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, hipStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor>
::call( &functor,
tbInitMaskPositively,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbReadPixelIndexFromVector,
tbDoAtomicCompaction);
}
struct applyHaarClassifierClassifierParallelFunctor
{
dim3 gridConf, blockConf;
hipStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
hipLaunchKernelGGL(( applyHaarClassifierClassifierParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value >)
, dim3(gridConf), dim3(blockConf), 0, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, hipStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor>
::call( &functor,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbDoAtomicCompaction);
}
struct initializeMaskVectorFunctor
{
dim3 gridConf, blockConf;
hipStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u step;
//Arguments are passed through the constructor
initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _step) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
step(_step)
{}
template<class TList>
void call(TList tl)
{
hipLaunchKernelGGL(( initializeMaskVector <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value >)
, dim3(gridConf), dim3(blockConf), 0, cuStream,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
}
};
void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, hipStream_t cuStream,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor>
::call( &functor,
tbMaskByInmask,
tbDoAtomicCompaction);
}
Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages)
{
Ncv32u i = 0;
for (; i<haar.NumStages; i++)
{
if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N)
{
break;
}
}
return i;
}
template <class T>
void swap(T &p1, T &p2)
{
T tmp = p1;
p1 = p2;
p2 = tmp;
}
NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &d_integralImage,
NCVMatrix<Ncv32f> &d_weights,
NCVMatrixAlloc<Ncv32u> &d_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea,
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
hipDeviceProp_t &devProp,
hipStream_t cuStream)
{
ncvAssertReturn(d_integralImage.memType() == d_weights.memType() &&
d_integralImage.memType() == d_pixelMask.memType() &&
d_integralImage.memType() == gpuAllocator.memType() &&
(d_integralImage.memType() == NCVMemoryTypeDevice ||
d_integralImage.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((d_integralImage.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height &&
d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height &&
d_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
d_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
#if defined _SELF_TEST_
NCVStatus ncvStat;
NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, d_integralImage.width, d_integralImage.height, d_integralImage.pitch);
ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch);
ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length);
ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length);
ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
ncvStat = d_pixelMask.copySolid(h_pixelMask, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_integralImage.copySolid(h_integralImage, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_weights.copySolid(h_weights, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(0), NCV_CUDA_ERROR);
for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<d_pixelMask.stride(); j++)
{
if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width))
{
if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U)
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j;
}
}
else
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
}
}
NCV_SKIP_COND_END
#endif
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride());
ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, d_vecPixelMask.length());
ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2);
ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
Ncv32u *hp_zero = &hp_pool32u.ptr()[0];
Ncv32u *hp_numDet = &hp_pool32u.ptr()[1];
NCV_SKIP_COND_BEGIN
*hp_zero = 0;
*hp_numDet = 0;
NCV_SKIP_COND_END
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
NcvBool bTexCacheCascade = devProp.major < 2;
NcvBool bTexCacheIImg = true; //this works better even on Fermi so far
NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3);
NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask;
NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp;
Ncv32u szNppCompactTmpBuf;
nppsStCompactGetSize_32u(d_vecPixelMask.length(), &szNppCompactTmpBuf, devProp);
if (bDoAtomicCompaction)
{
szNppCompactTmpBuf = 0;
}
NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf);
NCV_SKIP_COND_BEGIN
if (bTexCacheIImg)
{
hipChannelFormatDesc cfdTexIImage;
cfdTexIImage = hipCreateChannelDesc<Ncv32u>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texIImage, d_integralImage.ptr(), cfdTexIImage,
(anchorsRoi.height + haar.ClassifierSize.height) * d_integralImage.pitch()), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
if (bTexCacheCascade)
{
hipChannelFormatDesc cfdTexHaarFeatures;
hipChannelFormatDesc cfdTexHaarClassifierNodes;
cfdTexHaarFeatures = hipCreateChannelDesc<uint2>();
cfdTexHaarClassifierNodes = hipCreateChannelDesc<uint4>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarFeatures,
d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarClassifierNodes,
d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
Ncv32u stageStartAnchorParallel = 0;
Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL,
haar, h_HaarStages);
Ncv32u stageEndClassifierParallel = haar.NumStages;
if (stageMiddleSwitch == 0)
{
stageMiddleSwitch = 1;
}
//create stages subdivision for pixel-parallel processing
const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1;
Ncv32u curStop = stageStartAnchorParallel;
std::vector<Ncv32u> pixParallelStageStops;
while (curStop < stageMiddleSwitch)
{
pixParallelStageStops.push_back(curStop);
curStop += compactEveryNstage;
}
if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2)
{
pixParallelStageStops[pixParallelStageStops.size()-1] =
(stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2;
}
pixParallelStageStops.push_back(stageMiddleSwitch);
Ncv32u pixParallelStageStopsIndex = 0;
if (pixelStep != 1 || bMaskElements)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
(anchorsRoi.height + pixelStep - 1) / pixelStep);
dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL);
if (gridInit.x == 0 || gridInit.y == 0)
{
numDetections = 0;
return NCV_SUCCESS;
}
initializeMaskVectorDynTemplate(bMaskElements,
bDoAtomicCompaction,
gridInit, blockInit, cuStream,
d_ptrNowData->ptr(),
d_ptrNowTmp->ptr(),
d_vecPixelMask.length(), d_pixelMask.stride(),
anchorsRoi, pixelStep);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
swap(d_ptrNowData, d_ptrNowTmp);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), d_vecPixelMask.length(),
d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR);
}
numDetections = *hp_numDet;
}
else
{
//
// 1. Run the first pixel-input pixel-parallel classifier for few stages
//
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
anchorsRoi.height);
dim3 block1(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
true, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid1,
block1,
cuStream,
d_integralImage.ptr(), d_integralImage.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
0,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), d_vecPixelMask.length(),
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
pixParallelStageStopsIndex++;
}
//
// 2. Run pixel-parallel stages
//
for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++)
{
if (numDetections == 0)
{
break;
}
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL);
if (numDetections > MAX_GRID_DIM)
{
grid2.x = MAX_GRID_DIM;
grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block2(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
false, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid2,
block2,
cuStream,
d_integralImage.ptr(), d_integralImage.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
//
// 3. Run all left stages in one stage-parallel kernel
//
if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid3(numDetections);
if (numDetections > MAX_GRID_DIM)
{
grid3.x = MAX_GRID_DIM;
grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL);
applyHaarClassifierClassifierParallelDynTemplate(
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
bDoAtomicCompaction, //tbDoAtomicCompaction
grid3,
block3,
cuStream,
d_integralImage.ptr(), d_integralImage.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
stageMiddleSwitch,
stageEndClassifierParallel,
scaleAreaPixels);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
if (d_ptrNowData != &d_vecPixelMask)
{
d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
#if defined _SELF_TEST_
ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections);
}
Ncv32u fpu_oldcw, fpu_cw;
_controlfp_s(&fpu_cw, 0, 0);
fpu_oldcw = fpu_cw;
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
Ncv32u numDetGold;
ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar,
h_HaarStages, h_HaarNodes, h_HaarFeatures,
bMaskElements, anchorsRoi, pixelStep, scaleArea);
ncvAssertReturnNcvStat(ncvStat);
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
bool bPass = true;
if (numDetGold != numDetections)
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections);
bPass = false;
}
else
{
for (Ncv32u i=0; i<::max(numDetGold, numDetections) && bPass; i++)
{
if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i])
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]);
bPass = false;
}
}
}
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED");
#endif
NCV_SKIP_COND_END
return NCV_SUCCESS;
}
//==============================================================================
//
// HypothesesOperations file
//
//==============================================================================
const Ncv32u NUM_GROW_THREADS = 128;
__device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale)
{
NcvRect32u res;
res.x = (Ncv32u)(scale * (pixel & 0xFFFF));
res.y = (Ncv32u)(scale * (pixel >> 16));
res.width = (Ncv32u)(scale * width);
res.height = (Ncv32u)(scale * height);
return res;
}
__global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements,
NcvRect32u *hypotheses,
Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x;
if (elemAddr >= numElements)
{
return;
}
hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale);
}
NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale,
hipStream_t cuStream)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
dim3 block(NUM_GROW_THREADS);
dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( growDetectionsKernel), dim3(grid), dim3(block), 0, cuStream, pixelMask.ptr(), numDetsToCopy,
hypotheses.ptr() + totalDetections,
rectWidth, rectHeight, curScale);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
totalDetections += numDetsToCopy;
return ncvStat;
}
//==============================================================================
//
// Pipeline file
//
//==============================================================================
NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
NcvSize32u srcRoi,
NCVVector<NcvRect32u> &d_dstRects,
Ncv32u &dstNumRects,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvSize32u minObjSize,
Ncv32u minNeighbors, //default 4
Ncv32f scaleStep, //default 1.2f
Ncv32u pixelStep, //default 1
Ncv32u flags, //default NCVPipeObjDet_Default
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
hipDeviceProp_t &devProp,
hipStream_t cuStream)
{
ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() &&
d_srcImg.memType() == gpuAllocator.memType() &&
(d_srcImg.memType() == NCVMemoryTypeDevice ||
d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 &&
d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height &&
srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height &&
d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
//TODO: set NPP active stream to cuStream
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
Ncv32u integralWidth = d_srcImg.width() + 1;
Ncv32u integralHeight = d_srcImg.height() + 1;
NCVMatrixAlloc<Ncv32u> d_integralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVStatus nppStat;
Ncv32u szTmpBufIntegral, szTmpBufSqIntegral;
nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, ::max(szTmpBufIntegral, szTmpBufSqIntegral));
ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
d_integralImage.ptr(), d_integralImage.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
dstNumRects = 0;
Ncv32u lastCheckedScale = 0;
NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0);
std::vector<Ncv32u> scalesVector;
NcvBool bFoundLargestFace = false;
for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep)
{
Ncv32u scale = (Ncv32u)scaleIter;
if (lastCheckedScale == scale)
{
continue;
}
lastCheckedScale = scale;
if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width ||
haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height)
{
continue;
}
NcvSize32s srcRoi, srcIIRoi, scaledIIRoi, searchRoi;
srcRoi.width = d_srcImg.width();
srcRoi.height = d_srcImg.height();
srcIIRoi.width = srcRoi.width + 1;
srcIIRoi.height = srcRoi.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
if (searchRoi.width <= 0 || searchRoi.height <= 0)
{
break;
}
scalesVector.push_back(scale);
if (gpuAllocator.isCounting())
{
break;
}
}
if (bReverseTraverseScale)
{
std::reverse(scalesVector.begin(), scalesVector.end());
}
//TODO: handle _fair_scale_ flag
for (Ncv32u i=0; i<scalesVector.size(); i++)
{
Ncv32u scale = scalesVector[i];
NcvSize32u srcRoi, scaledIIRoi, searchRoi;
NcvSize32u srcIIRoi;
srcRoi.width = d_srcImg.width();
srcRoi.height = d_srcImg.height();
srcIIRoi.width = srcRoi.width + 1;
srcIIRoi.height = srcRoi.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
NCV_SKIP_COND_BEGIN
nppStat = nppiStDecimate_32u_C1R(
d_integralImage.ptr(), d_integralImage.pitch(),
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStDecimate_64u_C1R(
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
const NcvRect32u rect(
HAAR_STDDEV_BORDER,
HAAR_STDDEV_BORDER,
haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER,
haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER);
nppStat = nppiStRectStdDev_32f_C1R(
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
d_rectStdDev.ptr(), d_rectStdDev.pitch(),
NcvSize32u(searchRoi.width, searchRoi.height), rect,
(Ncv32f)scale*scale, true);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
Ncv32u detectionsOnThisScale;
ncvStat = ncvApplyHaarClassifierCascade_device(
d_scaledIntegralImage, d_rectStdDev, d_pixelMask,
detectionsOnThisScale,
haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false,
searchRoi, pixelStep, (Ncv32f)scale*scale,
gpuAllocator, cpuAllocator, devProp, cuStream);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_BEGIN
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment());
ncvStat = ncvGrowDetectionsVector_device(
d_vecPixelMask,
detectionsOnThisScale,
d_hypothesesIntermediate,
dstNumRects,
d_hypothesesIntermediate.length(),
haar.ClassifierSize.width,
haar.ClassifierSize.height,
(Ncv32f)scale,
cuStream);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (dstNumRects == 0)
{
continue;
}
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
Ncv32u numStrongHypothesesNow = dstNumRects;
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
numStrongHypothesesNow,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (numStrongHypothesesNow > 0)
{
NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0];
for (Ncv32u j=1; j<numStrongHypothesesNow; j++)
{
if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width)
{
maxRect = h_hypothesesIntermediate.ptr()[j];
}
}
h_hypothesesIntermediate.ptr()[0] = maxRect;
dstNumRects = 1;
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
bFoundLargestFace = true;
break;
}
}
NCV_SKIP_COND_END
if (gpuAllocator.isCounting())
{
break;
}
}
NCVStatus ncvRetCode = NCV_SUCCESS;
NCV_SKIP_COND_BEGIN
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (!bFoundLargestFace)
{
dstNumRects = 0;
}
}
else
{
//TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left)
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
dstNumRects,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (dstNumRects > d_dstRects.length())
{
ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
dstNumRects = d_dstRects.length();
}
if (dstNumRects != 0)
{
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
}
}
if (flags & NCVPipeObjDet_VisualizeInPlace)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(),
d_srcImg.width(), d_srcImg.height(),
d_dstRects.ptr(), dstNumRects, 255, cuStream);
}
NCV_SKIP_COND_END
return ncvRetCode;
}
//==============================================================================
//
// Purely Host code: classifier IO, mock-ups
//
//==============================================================================
#ifdef _SELF_TEST_
#include <float.h>
#endif
NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage,
NCVMatrix<Ncv32f> &h_weights,
NCVMatrixAlloc<Ncv32u> &h_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea)
{
ncvAssertReturn(h_integralImage.memType() == h_weights.memType() &&
h_integralImage.memType() == h_pixelMask.memType() &&
(h_integralImage.memType() == NCVMemoryTypeHostPageable ||
h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() &&
h_HaarStages.memType() == h_HaarFeatures.memType() &&
(h_HaarStages.memType() == NCVMemoryTypeHostPageable ||
h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height &&
h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height &&
h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages &&
h_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
h_HaarFeatures.length() >= haar.NumFeatures &&
h_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
for (Ncv32u i=0; i<anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<h_pixelMask.stride(); j++)
{
if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width)
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
else
{
for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++)
{
Ncv32f curStageSum = 0.0f;
Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset();
if (iStage == 0)
{
if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
else
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j);
}
}
else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u curNodeOffset = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset];
HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures();
Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect];
Ncv32u rectX, rectY, rectWidth, rectHeight;
feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight);
Ncv32f rectWeight = feature.getWeight();
Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride();
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL];
Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR];
Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL];
Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR];
Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR;
curNodeVal += (Ncv32f)rectSum * rectWeight;
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
curNodeOffset = nextNodeDescriptor.getNextNodeOffset();
}
}
curRootNodeOffset++;
}
Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold();
if (curStageSum < tmpStageThreshold)
{
//drop
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
}
}
}
std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride());
Ncv32u i = 0;
for (; i<anchorsRoi.height * h_pixelMask.stride(); i++)
{
if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
}
numDetections = i;
return NCV_SUCCESS;
}
NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
for (Ncv32u i=0; i<numDetsToCopy; i++)
{
hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale);
}
totalDetections += numDetsToCopy;
return ncvStat;
}
NCVStatus loadFromXML(const std::string &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures);
#define NVBIN_HAAR_SIZERESERVED 16
#define NVBIN_HAAR_VERSION 0x1
static NCVStatus loadFromNVBIN(const std::string &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures)
{
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
ncvAssertReturn(1 == fread(&fileVersion, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
Ncv32u fsize;
ncvAssertReturn(1 == fread(&fsize, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
fseek(fp, 0, SEEK_END);
Ncv32u fsizeActual = ftell(fp);
ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR);
std::vector<unsigned char> fdata;
fdata.resize(fsize);
Ncv32u dataOffset = 0;
fseek(fp, 0, SEEK_SET);
ncvAssertReturn(1 == fread(&fdata[0], fsize, 1, fp), NCV_FILE_ERROR);
fclose(fp);
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvSize32u);
haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haarStages.resize(haar.NumStages);
haarClassifierNodes.resize(haar.NumClassifierTotalNodes);
haarFeatures.resize(haar.NumFeatures);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages);
dataOffset += szStages;
memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers);
dataOffset += szClassifiers;
memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures);
dataOffset += szFeatures;
return NCV_SUCCESS;
}
NCVStatus ncvHaarGetClassifierSize(const std::string &filename, Ncv32u &numStages,
Ncv32u &numNodes, Ncv32u &numFeatures)
{
NCVStatus ncvStat;
std::string fext = filename.substr(filename.find_last_of(".") + 1);
std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower);
if (fext == "nvbin")
{
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
ncvAssertReturn(1 == fread(&fileVersion, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET);
Ncv32u tmp;
ncvAssertReturn(1 == fread(&numStages, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
ncvAssertReturn(1 == fread(&tmp, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
ncvAssertReturn(1 == fread(&numNodes, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
ncvAssertReturn(1 == fread(&numFeatures, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
fclose(fp);
}
else if (fext == "xml")
{
HaarClassifierCascadeDescriptor haar;
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
numStages = haar.NumStages;
numNodes = haar.NumClassifierTotalNodes;
numFeatures = haar.NumFeatures;
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
return NCV_SUCCESS;
}
NCVStatus ncvHaarLoadFromFile_host(const std::string &filename,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
NCVStatus ncvStat;
std::string fext = filename.substr(filename.find_last_of(".") + 1);
std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower);
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
if (fext == "nvbin")
{
ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else if (fext == "xml")
{
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64));
memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128));
memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64));
return NCV_SUCCESS;
}
NCVStatus ncvHaarStoreNVBIN_host(const std::string &filename,
HaarClassifierCascadeDescriptor haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
Ncv32u dataOffset = 0;
std::vector<unsigned char> fdata;
fdata.resize(szStages+szClassifiers+szFeatures+1024, 0);
//header
*(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION;
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures;
dataOffset += sizeof(Ncv32u);
*(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize;
dataOffset += sizeof(NcvSize32u);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII;
dataOffset += sizeof(NcvBool);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly;
dataOffset += sizeof(NcvBool);
memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages);
dataOffset += szStages;
memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers);
dataOffset += szClassifiers;
memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures);
dataOffset += szFeatures;
Ncv32u fsize = dataOffset;
//TODO: CRC32 here
//update header
dataOffset = sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = fsize;
FILE *fp = fopen(filename.c_str(), "wb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
fwrite(&fdata[0], fsize, 1, fp);
fclose(fp);
return NCV_SUCCESS;
}
| 32c3523f0be87719b5f3dff16e2608d1ea2e8269.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
////////////////////////////////////////////////////////////////////////////////
//
// NVIDIA CUDA implementation of Viola-Jones Object Detection Framework
//
// The algorithm and code are explained in the upcoming GPU Computing Gems
// chapter in detail:
//
// Anton Obukhov, "Haar Classifiers for Object Detection with CUDA"
// PDF URL placeholder
// email: [email protected], [email protected]
//
// Credits for help with the code to:
// Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov.
//
////////////////////////////////////////////////////////////////////////////////
#include <algorithm>
#include <cstdio>
#include "NCV.hpp"
#include "NPP_staging/NPP_staging.hpp"
#include "NCVRuntimeTemplates.hpp"
#include "NCVHaarObjectDetection.hpp"
//==============================================================================
//
// BlockScan file
//
//==============================================================================
NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE and size is power of 2
template <class T>
inline __device__ T warpScanInclusive(T idata, volatile T *s_Data)
{
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
//for(Ncv32u offset = 1; offset < K_WARP_SIZE; offset <<= 1)
//{
// s_Data[pos] += s_Data[pos - offset];
//}
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
}
template <class T>
inline __device__ T warpScanExclusive(T idata, volatile T *s_Data)
{
return warpScanInclusive(idata, s_Data) - idata;
}
template <class T, Ncv32u tiNumScanThreads>
inline __device__ T blockScanInclusive(T idata, volatile T *s_Data)
{
if (tiNumScanThreads > K_WARP_SIZE)
{
//Bottom-level inclusive warp scan
T warpResult = warpScanInclusive(idata, s_Data);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) )
{
s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) )
{
//grab top warp elements
T val = s_Data[threadIdx.x];
//calculate exclusive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE];
}
else
{
return warpScanInclusive(idata, s_Data);
}
}
//==============================================================================
//
// HaarClassifierCascade file
//
//==============================================================================
const Ncv32u MAX_GRID_DIM = 65535;
const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64;
#define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6
#define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2)
/** \internal
* Haar features solid array.
*/
texture<uint2, 1, cudaReadModeElementType> texHaarFeatures;
/** \internal
* Haar classifiers flattened trees container.
* Two parts: first contains root nodes, second - nodes that are referred by root nodes.
* Drawback: breaks tree locality (might cause more cache misses
* Advantage: No need to introduce additional 32-bit field to index root nodes offsets
*/
texture<uint4, 1, cudaReadModeElementType> texHaarClassifierNodes;
texture<Ncv32u, 1, cudaReadModeElementType> texIImage;
__device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages)
{
return d_Stages[iStage];
}
template <NcvBool tbCacheTextureCascade>
__device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes)
{
HaarClassifierNode128 tmpNode;
if (tbCacheTextureCascade)
{
tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode);
}
else
{
tmpNode = d_ClassifierNodes[iNode];
}
return tmpNode;
}
template <NcvBool tbCacheTextureCascade>
__device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features,
Ncv32f *weight,
Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight)
{
HaarFeature64 feature;
if (tbCacheTextureCascade)
{
feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature);
}
else
{
feature = d_Features[iFeature];
}
feature.getRect(rectX, rectY, rectWidth, rectHeight);
*weight = feature.getWeight();
}
template <NcvBool tbCacheTextureIImg>
__device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg)
{
if (tbCacheTextureIImg)
{
return tex1Dfetch(texIImage, x);
}
else
{
return d_IImg[x];
}
}
__device__ Ncv32f reduceSpecialization(Ncv32f partialSum)
{
__shared__ volatile Ncv32f reductor[NUM_THREADS_CLASSIFIERPARALLEL];
reductor[threadIdx.x] = partialSum;
__syncthreads();
#if defined CPU_FP_COMPLIANCE
if (!threadIdx.x)
{
Ncv32f sum = 0.0f;
for (int i=0; i<NUM_THREADS_CLASSIFIERPARALLEL; i++)
{
sum += reductor[i];
}
reductor[0] = sum;
}
#else
#if NUM_THREADS_CLASSIFIERPARALLEL_LOG2 >= 8
if (threadIdx.x < 128)
{
reductor[threadIdx.x] += reductor[threadIdx.x + 128];
}
__syncthreads();
#endif
#if NUM_THREADS_CLASSIFIERPARALLEL_LOG2 >= 7
if (threadIdx.x < 64)
{
reductor[threadIdx.x] += reductor[threadIdx.x + 64];
}
__syncthreads();
#endif
if (threadIdx.x < 32)
{
#if NUM_THREADS_CLASSIFIERPARALLEL_LOG2 >= 6
reductor[threadIdx.x] += reductor[threadIdx.x + 32];
#endif
#if NUM_THREADS_CLASSIFIERPARALLEL_LOG2 >= 5
reductor[threadIdx.x] += reductor[threadIdx.x + 16];
#endif
reductor[threadIdx.x] += reductor[threadIdx.x + 8];
reductor[threadIdx.x] += reductor[threadIdx.x + 4];
reductor[threadIdx.x] += reductor[threadIdx.x + 2];
reductor[threadIdx.x] += reductor[threadIdx.x + 1];
}
#endif
__syncthreads();
return reductor[0];
}
__device__ Ncv32u d_outMaskPosition;
__inline __device__ void compactBlockWriteOutAnchorParallel(NcvBool threadPassFlag,
Ncv32u threadElem,
Ncv32u *vectorOut)
{
#if __CUDA_ARCH__ >= 110
Ncv32u passMaskElem = threadPassFlag ? 1 : 0;
__shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2];
Ncv32u incScan = blockScanInclusive<Ncv32u, NUM_THREADS_ANCHORSPARALLEL>(passMaskElem, shmem);
__syncthreads();
Ncv32u excScan = incScan - passMaskElem;
__shared__ Ncv32u numPassed;
__shared__ Ncv32u outMaskOffset;
if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1)
{
numPassed = incScan;
outMaskOffset = atomicAdd(&d_outMaskPosition, incScan);
}
__syncthreads();
if (threadPassFlag)
{
shmem[excScan] = threadElem;
}
__syncthreads();
if (threadIdx.x < numPassed)
{
vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x];
}
#endif
}
template <NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u y_offs;
Ncv32u x_offs;
Ncv32u maskOffset;
Ncv32u outMaskVal;
NcvBool bInactiveThread = false;
if (tbReadPixelIndexFromVector)
{
maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (maskOffset >= mask1Dlen)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
outMaskVal = d_inMask[maskOffset];
y_offs = outMaskVal >> 16;
x_offs = outMaskVal & 0xFFFF;
}
}
else
{
y_offs = blockIdx.y;
x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (x_offs >= mask2Dstride)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
maskOffset = y_offs * mask2Dstride + x_offs;
if ((x_offs >= anchorsRoi.width) ||
(!tbInitMaskPositively &&
d_inMask != d_outMask &&
d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U))
{
if (tbDoAtomicCompaction)
{
bInactiveThread = true;
}
else
{
d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U;
return;
}
}
outMaskVal = (y_offs << 16) | x_offs;
}
}
NcvBool bPass = true;
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++)
{
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset();
Ncv32f stageThreshold = curStage.getStageThreshold();
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u iNode = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
__syncthreads();
curRootNodeOffset++;
}
if (curStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
}
__syncthreads();
if (!tbDoAtomicCompaction)
{
if (!tbReadPixelIndexFromVector ||
(tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask)))
{
d_outMask[maskOffset] = outMaskVal;
}
}
else
{
compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread,
outMaskVal,
d_outMask);
}
}
template <NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x;
if (maskOffset >= mask1Dlen)
{
return;
}
Ncv32u outMaskVal = d_inMask[maskOffset];
Ncv32u y_offs = outMaskVal >> 16;
Ncv32u x_offs = outMaskVal & 0xFFFF;
Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
NcvBool bPass = true;
for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++)
{
//this variable is subject to reduction
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x;
Ncv32f stageThreshold = curStage.getStageThreshold();
Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2;
for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++)
{
NcvBool bMoreNodesToTraverse = true;
if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage)
{
Ncv32u iNode = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
//TODO: fetch into shmem if size suffices. Shmem can be shared with reduce
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
}
__syncthreads();
curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL;
}
Ncv32f finalStageSum = reduceSpecialization(curStageSum);
if (finalStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
if (!tbDoAtomicCompaction)
{
if (!bPass || d_inMask != d_outMask)
{
if (!threadIdx.x)
{
d_outMask[maskOffset] = outMaskVal;
}
}
}
else
{
#if __CUDA_ARCH__ >= 110
if (bPass && !threadIdx.x)
{
Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1);
d_outMask[outMaskOffset] = outMaskVal;
}
#endif
}
}
template <NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction>
__global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
Ncv32u y_offs = blockIdx.y;
Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs;
Ncv32u y_offs_upsc = step * y_offs;
Ncv32u x_offs_upsc = step * x_offs;
Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc;
Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U;
if (x_offs_upsc < anchorsRoi.width &&
(!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U))
{
outElem = (y_offs_upsc << 16) | x_offs_upsc;
}
if (!tbDoAtomicCompaction)
{
d_outMask[outMaskOffset] = outElem;
}
else
{
compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U,
outElem,
d_outMask);
}
}
struct applyHaarClassifierAnchorParallelFunctor
{
dim3 gridConf, blockConf;
cudaStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
applyHaarClassifierAnchorParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value,
Loki::TL::TypeAt<TList, 3>::Result::value,
Loki::TL::TypeAt<TList, 4>::Result::value >
<<<gridConf, blockConf, 0, cuStream>>>
(d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, cudaStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor>
::call( &functor,
tbInitMaskPositively,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbReadPixelIndexFromVector,
tbDoAtomicCompaction);
}
struct applyHaarClassifierClassifierParallelFunctor
{
dim3 gridConf, blockConf;
cudaStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
applyHaarClassifierClassifierParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value >
<<<gridConf, blockConf, 0, cuStream>>>
(d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, cudaStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor>
::call( &functor,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbDoAtomicCompaction);
}
struct initializeMaskVectorFunctor
{
dim3 gridConf, blockConf;
cudaStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u step;
//Arguments are passed through the constructor
initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _step) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
step(_step)
{}
template<class TList>
void call(TList tl)
{
initializeMaskVector <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value >
<<<gridConf, blockConf, 0, cuStream>>>
(d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
}
};
void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, cudaStream_t cuStream,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor>
::call( &functor,
tbMaskByInmask,
tbDoAtomicCompaction);
}
Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages)
{
Ncv32u i = 0;
for (; i<haar.NumStages; i++)
{
if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N)
{
break;
}
}
return i;
}
template <class T>
void swap(T &p1, T &p2)
{
T tmp = p1;
p1 = p2;
p2 = tmp;
}
NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &d_integralImage,
NCVMatrix<Ncv32f> &d_weights,
NCVMatrixAlloc<Ncv32u> &d_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea,
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
cudaDeviceProp &devProp,
cudaStream_t cuStream)
{
ncvAssertReturn(d_integralImage.memType() == d_weights.memType() &&
d_integralImage.memType() == d_pixelMask.memType() &&
d_integralImage.memType() == gpuAllocator.memType() &&
(d_integralImage.memType() == NCVMemoryTypeDevice ||
d_integralImage.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((d_integralImage.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height &&
d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height &&
d_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
d_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
#if defined _SELF_TEST_
NCVStatus ncvStat;
NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, d_integralImage.width, d_integralImage.height, d_integralImage.pitch);
ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch);
ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length);
ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length);
ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
ncvStat = d_pixelMask.copySolid(h_pixelMask, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_integralImage.copySolid(h_integralImage, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_weights.copySolid(h_weights, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<d_pixelMask.stride(); j++)
{
if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width))
{
if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U)
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j;
}
}
else
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
}
}
NCV_SKIP_COND_END
#endif
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride());
ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, d_vecPixelMask.length());
ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2);
ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
Ncv32u *hp_zero = &hp_pool32u.ptr()[0];
Ncv32u *hp_numDet = &hp_pool32u.ptr()[1];
NCV_SKIP_COND_BEGIN
*hp_zero = 0;
*hp_numDet = 0;
NCV_SKIP_COND_END
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
NcvBool bTexCacheCascade = devProp.major < 2;
NcvBool bTexCacheIImg = true; //this works better even on Fermi so far
NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3);
NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask;
NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp;
Ncv32u szNppCompactTmpBuf;
nppsStCompactGetSize_32u(d_vecPixelMask.length(), &szNppCompactTmpBuf, devProp);
if (bDoAtomicCompaction)
{
szNppCompactTmpBuf = 0;
}
NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf);
NCV_SKIP_COND_BEGIN
if (bTexCacheIImg)
{
cudaChannelFormatDesc cfdTexIImage;
cfdTexIImage = cudaCreateChannelDesc<Ncv32u>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texIImage, d_integralImage.ptr(), cfdTexIImage,
(anchorsRoi.height + haar.ClassifierSize.height) * d_integralImage.pitch()), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
if (bTexCacheCascade)
{
cudaChannelFormatDesc cfdTexHaarFeatures;
cudaChannelFormatDesc cfdTexHaarClassifierNodes;
cfdTexHaarFeatures = cudaCreateChannelDesc<uint2>();
cfdTexHaarClassifierNodes = cudaCreateChannelDesc<uint4>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarFeatures,
d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarClassifierNodes,
d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
Ncv32u stageStartAnchorParallel = 0;
Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL,
haar, h_HaarStages);
Ncv32u stageEndClassifierParallel = haar.NumStages;
if (stageMiddleSwitch == 0)
{
stageMiddleSwitch = 1;
}
//create stages subdivision for pixel-parallel processing
const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1;
Ncv32u curStop = stageStartAnchorParallel;
std::vector<Ncv32u> pixParallelStageStops;
while (curStop < stageMiddleSwitch)
{
pixParallelStageStops.push_back(curStop);
curStop += compactEveryNstage;
}
if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2)
{
pixParallelStageStops[pixParallelStageStops.size()-1] =
(stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2;
}
pixParallelStageStops.push_back(stageMiddleSwitch);
Ncv32u pixParallelStageStopsIndex = 0;
if (pixelStep != 1 || bMaskElements)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
(anchorsRoi.height + pixelStep - 1) / pixelStep);
dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL);
if (gridInit.x == 0 || gridInit.y == 0)
{
numDetections = 0;
return NCV_SUCCESS;
}
initializeMaskVectorDynTemplate(bMaskElements,
bDoAtomicCompaction,
gridInit, blockInit, cuStream,
d_ptrNowData->ptr(),
d_ptrNowTmp->ptr(),
d_vecPixelMask.length(), d_pixelMask.stride(),
anchorsRoi, pixelStep);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
swap(d_ptrNowData, d_ptrNowTmp);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), d_vecPixelMask.length(),
d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR);
}
numDetections = *hp_numDet;
}
else
{
//
// 1. Run the first pixel-input pixel-parallel classifier for few stages
//
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
anchorsRoi.height);
dim3 block1(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
true, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid1,
block1,
cuStream,
d_integralImage.ptr(), d_integralImage.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
0,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), d_vecPixelMask.length(),
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
pixParallelStageStopsIndex++;
}
//
// 2. Run pixel-parallel stages
//
for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++)
{
if (numDetections == 0)
{
break;
}
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL);
if (numDetections > MAX_GRID_DIM)
{
grid2.x = MAX_GRID_DIM;
grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block2(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
false, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid2,
block2,
cuStream,
d_integralImage.ptr(), d_integralImage.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
//
// 3. Run all left stages in one stage-parallel kernel
//
if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid3(numDetections);
if (numDetections > MAX_GRID_DIM)
{
grid3.x = MAX_GRID_DIM;
grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL);
applyHaarClassifierClassifierParallelDynTemplate(
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
bDoAtomicCompaction, //tbDoAtomicCompaction
grid3,
block3,
cuStream,
d_integralImage.ptr(), d_integralImage.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
stageMiddleSwitch,
stageEndClassifierParallel,
scaleAreaPixels);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
if (d_ptrNowData != &d_vecPixelMask)
{
d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
#if defined _SELF_TEST_
ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections);
}
Ncv32u fpu_oldcw, fpu_cw;
_controlfp_s(&fpu_cw, 0, 0);
fpu_oldcw = fpu_cw;
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
Ncv32u numDetGold;
ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar,
h_HaarStages, h_HaarNodes, h_HaarFeatures,
bMaskElements, anchorsRoi, pixelStep, scaleArea);
ncvAssertReturnNcvStat(ncvStat);
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
bool bPass = true;
if (numDetGold != numDetections)
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections);
bPass = false;
}
else
{
for (Ncv32u i=0; i<std::max(numDetGold, numDetections) && bPass; i++)
{
if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i])
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]);
bPass = false;
}
}
}
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED");
#endif
NCV_SKIP_COND_END
return NCV_SUCCESS;
}
//==============================================================================
//
// HypothesesOperations file
//
//==============================================================================
const Ncv32u NUM_GROW_THREADS = 128;
__device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale)
{
NcvRect32u res;
res.x = (Ncv32u)(scale * (pixel & 0xFFFF));
res.y = (Ncv32u)(scale * (pixel >> 16));
res.width = (Ncv32u)(scale * width);
res.height = (Ncv32u)(scale * height);
return res;
}
__global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements,
NcvRect32u *hypotheses,
Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x;
if (elemAddr >= numElements)
{
return;
}
hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale);
}
NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale,
cudaStream_t cuStream)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
dim3 block(NUM_GROW_THREADS);
dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
growDetectionsKernel<<<grid, block, 0, cuStream>>>(pixelMask.ptr(), numDetsToCopy,
hypotheses.ptr() + totalDetections,
rectWidth, rectHeight, curScale);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
totalDetections += numDetsToCopy;
return ncvStat;
}
//==============================================================================
//
// Pipeline file
//
//==============================================================================
NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
NcvSize32u srcRoi,
NCVVector<NcvRect32u> &d_dstRects,
Ncv32u &dstNumRects,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvSize32u minObjSize,
Ncv32u minNeighbors, //default 4
Ncv32f scaleStep, //default 1.2f
Ncv32u pixelStep, //default 1
Ncv32u flags, //default NCVPipeObjDet_Default
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
cudaDeviceProp &devProp,
cudaStream_t cuStream)
{
ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() &&
d_srcImg.memType() == gpuAllocator.memType() &&
(d_srcImg.memType() == NCVMemoryTypeDevice ||
d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 &&
d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height &&
srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height &&
d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
//TODO: set NPP active stream to cuStream
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
Ncv32u integralWidth = d_srcImg.width() + 1;
Ncv32u integralHeight = d_srcImg.height() + 1;
NCVMatrixAlloc<Ncv32u> d_integralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVStatus nppStat;
Ncv32u szTmpBufIntegral, szTmpBufSqIntegral;
nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, std::max(szTmpBufIntegral, szTmpBufSqIntegral));
ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
d_integralImage.ptr(), d_integralImage.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
dstNumRects = 0;
Ncv32u lastCheckedScale = 0;
NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0);
std::vector<Ncv32u> scalesVector;
NcvBool bFoundLargestFace = false;
for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep)
{
Ncv32u scale = (Ncv32u)scaleIter;
if (lastCheckedScale == scale)
{
continue;
}
lastCheckedScale = scale;
if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width ||
haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height)
{
continue;
}
NcvSize32s srcRoi, srcIIRoi, scaledIIRoi, searchRoi;
srcRoi.width = d_srcImg.width();
srcRoi.height = d_srcImg.height();
srcIIRoi.width = srcRoi.width + 1;
srcIIRoi.height = srcRoi.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
if (searchRoi.width <= 0 || searchRoi.height <= 0)
{
break;
}
scalesVector.push_back(scale);
if (gpuAllocator.isCounting())
{
break;
}
}
if (bReverseTraverseScale)
{
std::reverse(scalesVector.begin(), scalesVector.end());
}
//TODO: handle _fair_scale_ flag
for (Ncv32u i=0; i<scalesVector.size(); i++)
{
Ncv32u scale = scalesVector[i];
NcvSize32u srcRoi, scaledIIRoi, searchRoi;
NcvSize32u srcIIRoi;
srcRoi.width = d_srcImg.width();
srcRoi.height = d_srcImg.height();
srcIIRoi.width = srcRoi.width + 1;
srcIIRoi.height = srcRoi.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
NCV_SKIP_COND_BEGIN
nppStat = nppiStDecimate_32u_C1R(
d_integralImage.ptr(), d_integralImage.pitch(),
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStDecimate_64u_C1R(
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
const NcvRect32u rect(
HAAR_STDDEV_BORDER,
HAAR_STDDEV_BORDER,
haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER,
haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER);
nppStat = nppiStRectStdDev_32f_C1R(
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
d_rectStdDev.ptr(), d_rectStdDev.pitch(),
NcvSize32u(searchRoi.width, searchRoi.height), rect,
(Ncv32f)scale*scale, true);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
Ncv32u detectionsOnThisScale;
ncvStat = ncvApplyHaarClassifierCascade_device(
d_scaledIntegralImage, d_rectStdDev, d_pixelMask,
detectionsOnThisScale,
haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false,
searchRoi, pixelStep, (Ncv32f)scale*scale,
gpuAllocator, cpuAllocator, devProp, cuStream);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_BEGIN
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment());
ncvStat = ncvGrowDetectionsVector_device(
d_vecPixelMask,
detectionsOnThisScale,
d_hypothesesIntermediate,
dstNumRects,
d_hypothesesIntermediate.length(),
haar.ClassifierSize.width,
haar.ClassifierSize.height,
(Ncv32f)scale,
cuStream);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (dstNumRects == 0)
{
continue;
}
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
Ncv32u numStrongHypothesesNow = dstNumRects;
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
numStrongHypothesesNow,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (numStrongHypothesesNow > 0)
{
NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0];
for (Ncv32u j=1; j<numStrongHypothesesNow; j++)
{
if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width)
{
maxRect = h_hypothesesIntermediate.ptr()[j];
}
}
h_hypothesesIntermediate.ptr()[0] = maxRect;
dstNumRects = 1;
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
bFoundLargestFace = true;
break;
}
}
NCV_SKIP_COND_END
if (gpuAllocator.isCounting())
{
break;
}
}
NCVStatus ncvRetCode = NCV_SUCCESS;
NCV_SKIP_COND_BEGIN
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (!bFoundLargestFace)
{
dstNumRects = 0;
}
}
else
{
//TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left)
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
dstNumRects,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (dstNumRects > d_dstRects.length())
{
ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
dstNumRects = d_dstRects.length();
}
if (dstNumRects != 0)
{
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
}
}
if (flags & NCVPipeObjDet_VisualizeInPlace)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(),
d_srcImg.width(), d_srcImg.height(),
d_dstRects.ptr(), dstNumRects, 255, cuStream);
}
NCV_SKIP_COND_END
return ncvRetCode;
}
//==============================================================================
//
// Purely Host code: classifier IO, mock-ups
//
//==============================================================================
#ifdef _SELF_TEST_
#include <float.h>
#endif
NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage,
NCVMatrix<Ncv32f> &h_weights,
NCVMatrixAlloc<Ncv32u> &h_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea)
{
ncvAssertReturn(h_integralImage.memType() == h_weights.memType() &&
h_integralImage.memType() == h_pixelMask.memType() &&
(h_integralImage.memType() == NCVMemoryTypeHostPageable ||
h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() &&
h_HaarStages.memType() == h_HaarFeatures.memType() &&
(h_HaarStages.memType() == NCVMemoryTypeHostPageable ||
h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height &&
h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height &&
h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages &&
h_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
h_HaarFeatures.length() >= haar.NumFeatures &&
h_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
for (Ncv32u i=0; i<anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<h_pixelMask.stride(); j++)
{
if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width)
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
else
{
for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++)
{
Ncv32f curStageSum = 0.0f;
Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset();
if (iStage == 0)
{
if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
else
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j);
}
}
else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u curNodeOffset = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset];
HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures();
Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect];
Ncv32u rectX, rectY, rectWidth, rectHeight;
feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight);
Ncv32f rectWeight = feature.getWeight();
Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride();
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL];
Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR];
Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL];
Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR];
Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR;
curNodeVal += (Ncv32f)rectSum * rectWeight;
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
curNodeOffset = nextNodeDescriptor.getNextNodeOffset();
}
}
curRootNodeOffset++;
}
Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold();
if (curStageSum < tmpStageThreshold)
{
//drop
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
}
}
}
std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride());
Ncv32u i = 0;
for (; i<anchorsRoi.height * h_pixelMask.stride(); i++)
{
if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
}
numDetections = i;
return NCV_SUCCESS;
}
NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
for (Ncv32u i=0; i<numDetsToCopy; i++)
{
hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale);
}
totalDetections += numDetsToCopy;
return ncvStat;
}
NCVStatus loadFromXML(const std::string &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures);
#define NVBIN_HAAR_SIZERESERVED 16
#define NVBIN_HAAR_VERSION 0x1
static NCVStatus loadFromNVBIN(const std::string &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures)
{
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
ncvAssertReturn(1 == fread(&fileVersion, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
Ncv32u fsize;
ncvAssertReturn(1 == fread(&fsize, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
fseek(fp, 0, SEEK_END);
Ncv32u fsizeActual = ftell(fp);
ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR);
std::vector<unsigned char> fdata;
fdata.resize(fsize);
Ncv32u dataOffset = 0;
fseek(fp, 0, SEEK_SET);
ncvAssertReturn(1 == fread(&fdata[0], fsize, 1, fp), NCV_FILE_ERROR);
fclose(fp);
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvSize32u);
haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haarStages.resize(haar.NumStages);
haarClassifierNodes.resize(haar.NumClassifierTotalNodes);
haarFeatures.resize(haar.NumFeatures);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages);
dataOffset += szStages;
memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers);
dataOffset += szClassifiers;
memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures);
dataOffset += szFeatures;
return NCV_SUCCESS;
}
NCVStatus ncvHaarGetClassifierSize(const std::string &filename, Ncv32u &numStages,
Ncv32u &numNodes, Ncv32u &numFeatures)
{
NCVStatus ncvStat;
std::string fext = filename.substr(filename.find_last_of(".") + 1);
std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower);
if (fext == "nvbin")
{
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
ncvAssertReturn(1 == fread(&fileVersion, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET);
Ncv32u tmp;
ncvAssertReturn(1 == fread(&numStages, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
ncvAssertReturn(1 == fread(&tmp, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
ncvAssertReturn(1 == fread(&numNodes, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
ncvAssertReturn(1 == fread(&numFeatures, sizeof(Ncv32u), 1, fp), NCV_FILE_ERROR);
fclose(fp);
}
else if (fext == "xml")
{
HaarClassifierCascadeDescriptor haar;
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
numStages = haar.NumStages;
numNodes = haar.NumClassifierTotalNodes;
numFeatures = haar.NumFeatures;
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
return NCV_SUCCESS;
}
NCVStatus ncvHaarLoadFromFile_host(const std::string &filename,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
NCVStatus ncvStat;
std::string fext = filename.substr(filename.find_last_of(".") + 1);
std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower);
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
if (fext == "nvbin")
{
ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else if (fext == "xml")
{
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64));
memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128));
memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64));
return NCV_SUCCESS;
}
NCVStatus ncvHaarStoreNVBIN_host(const std::string &filename,
HaarClassifierCascadeDescriptor haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
Ncv32u dataOffset = 0;
std::vector<unsigned char> fdata;
fdata.resize(szStages+szClassifiers+szFeatures+1024, 0);
//header
*(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION;
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures;
dataOffset += sizeof(Ncv32u);
*(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize;
dataOffset += sizeof(NcvSize32u);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII;
dataOffset += sizeof(NcvBool);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly;
dataOffset += sizeof(NcvBool);
memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages);
dataOffset += szStages;
memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers);
dataOffset += szClassifiers;
memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures);
dataOffset += szFeatures;
Ncv32u fsize = dataOffset;
//TODO: CRC32 here
//update header
dataOffset = sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = fsize;
FILE *fp = fopen(filename.c_str(), "wb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
fwrite(&fdata[0], fsize, 1, fp);
fclose(fp);
return NCV_SUCCESS;
}
|
53e77a7d5b7b57df8e12676f695ed4438110a579.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "set_volume_to_constant.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
float value = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
set_volume_to_constant), dim3(gridBlock),dim3(threadBlock), 0, 0, out,value);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
set_volume_to_constant), dim3(gridBlock),dim3(threadBlock), 0, 0, out,value);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
set_volume_to_constant), dim3(gridBlock),dim3(threadBlock), 0, 0, out,value);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 53e77a7d5b7b57df8e12676f695ed4438110a579.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "set_volume_to_constant.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
float value = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
set_volume_to_constant<<<gridBlock,threadBlock>>>(out,value);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
set_volume_to_constant<<<gridBlock,threadBlock>>>(out,value);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
set_volume_to_constant<<<gridBlock,threadBlock>>>(out,value);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d1f619d7151409f7333225a5b12d326e2aa466f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define BLOCKSIZE 512
__global__ void prefix(const char *cuStr, int *cuPos, int strLen){
int localIdx = threadIdx.x;
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(globalIdx >= strLen) return;
__shared__ int buf[BLOCKSIZE];
buf[localIdx] = cuStr[globalIdx] == ' ' ? localIdx : -1;
__syncthreads();
int cnt = 0;
while(buf[localIdx] < 0 && localIdx > cnt){
cnt++;
buf[localIdx] = buf[localIdx-cnt];
}
cuPos[globalIdx] = localIdx - buf[localIdx];
}
__global__ void combine(int *cuPos, int strLen){
int localIdx = threadIdx.x;
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(globalIdx >= strLen) return;
if(blockIdx.x > 0 && cuPos[globalIdx] == localIdx + 1)
cuPos[globalIdx] += cuPos[blockIdx.x * blockDim.x - 1];
}
void labeling(const char *cuStr, int *cuPos, int strLen){
const int GRIDSIZE = (strLen+BLOCKSIZE) / BLOCKSIZE;
hipLaunchKernelGGL(( prefix) , dim3(GRIDSIZE), dim3(BLOCKSIZE) , 0, 0, cuStr, cuPos, strLen);
hipLaunchKernelGGL(( combine) , dim3(GRIDSIZE), dim3(BLOCKSIZE) , 0, 0, cuPos, strLen);
} | d1f619d7151409f7333225a5b12d326e2aa466f9.cu | #include <stdio.h>
#define BLOCKSIZE 512
__global__ void prefix(const char *cuStr, int *cuPos, int strLen){
int localIdx = threadIdx.x;
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(globalIdx >= strLen) return;
__shared__ int buf[BLOCKSIZE];
buf[localIdx] = cuStr[globalIdx] == ' ' ? localIdx : -1;
__syncthreads();
int cnt = 0;
while(buf[localIdx] < 0 && localIdx > cnt){
cnt++;
buf[localIdx] = buf[localIdx-cnt];
}
cuPos[globalIdx] = localIdx - buf[localIdx];
}
__global__ void combine(int *cuPos, int strLen){
int localIdx = threadIdx.x;
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(globalIdx >= strLen) return;
if(blockIdx.x > 0 && cuPos[globalIdx] == localIdx + 1)
cuPos[globalIdx] += cuPos[blockIdx.x * blockDim.x - 1];
}
void labeling(const char *cuStr, int *cuPos, int strLen){
const int GRIDSIZE = (strLen+BLOCKSIZE) / BLOCKSIZE;
prefix <<< GRIDSIZE, BLOCKSIZE >>> (cuStr, cuPos, strLen);
combine <<< GRIDSIZE, BLOCKSIZE >>> (cuPos, strLen);
} |
492387ebbf90ea5334b2041b4d472df1d9593db8.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <hip/hip_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=1;
unsigned Value2=A[i];
unsigned Value3=B[i];
unsigned Value;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive INT addition access
if((i%2)!=0){
for(unsigned k=0; k<iterations;k++) {
Value2= I1*Value1;
Value3=I2*Value3;
Value1*=Value2;
Value3*=Value1;
Value2*=Value3;
Value1*=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random unsigned entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
} | 492387ebbf90ea5334b2041b4d472df1d9593db8.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <cuda_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=1;
unsigned Value2=A[i];
unsigned Value3=B[i];
unsigned Value;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive INT addition access
if((i%2)!=0){
for(unsigned k=0; k<iterations;k++) {
Value2= I1*Value1;
Value3=I2*Value3;
Value1*=Value2;
Value3*=Value1;
Value2*=Value3;
Value1*=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random unsigned entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
} |
a6c705fd15d07743be456f791f953ccc4a30ce2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "rgb2yuv.cuh"
#include <stdint.h>
#include <hip/hip_runtime.h>
#define CHECK(res) { if(res != hipSuccess){printf("Error %s:%d , ", __FILE__,__LINE__); \
printf("code : %d , reason : %s \n", res,hipGetErrorString(res));exit(-1);}}
#define CUDA(x) cudaCheckError((x), #x, __FILE__, __LINE__)
#define CUDA_FAILED(x) (CUDA(x) != hipSuccess)
#define COLOR_COMPONENT_MASK 0x3FF
#define COLOR_COMPONENT_BIT_SIZE 10
#define FIXED_DECIMAL_POINT 24
#define FIXED_POINT_MULTIPLIER 1.0f
#define FIXED_COLOR_COMPONENT_MASK 0xffffffff
#define LOG_CUDA "[cuda] "
__constant__ uint32_t constAlpha;
__constant__ float constHueColorSpaceMat[9];
/**
* iDivUp
*/
inline __device__ __host__ int iDivUp( int a, int b ) { return (a % b != 0) ? (a / b + 1) : (a / b); }
inline hipError_t cudaCheckError(hipError_t retval, const char* txt, const char* file, int line )
{
//int activeDevice = -1;
//hipGetDevice(&activeDevice);
//Log("[cuda] device %i - %s\n", activeDevice, txt);
printf(LOG_CUDA "%s\n", txt);
if( retval != hipSuccess )
{
printf(LOG_CUDA " %s (error %u) (hex 0x%02X)\n", hipGetErrorString(retval), retval, retval);
printf(LOG_CUDA " %s:%i\n", file, line);
}
return retval;
}
inline __device__ void rgb_to_y(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y)
{
y = static_cast<uint8_t>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
inline __device__ void rgb_to_yuv(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y, uint8_t& u, uint8_t& v)
{
rgb_to_y(r, g, b, y);
u = static_cast<uint8_t>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uint8_t>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
template <typename T, bool formatYV12>
__global__ void RGB_to_YV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height )
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
printf("[%d,%d]\n",x,y);
const int x1 = x + 1;
const int y1 = y + 1;
if( x1 >= width || y1 >= height )
return;
const int planeSize = height * dstPitch;
uint8_t* y_plane = dst;
uint8_t* u_plane;
uint8_t* v_plane;
if( formatYV12 )
{
u_plane = y_plane + planeSize;
v_plane = u_plane + (planeSize / 4); // size of U & V planes is 25% of Y plane
}
else
{
v_plane = y_plane + planeSize; // in I420, order of U & V planes is reversed
u_plane = v_plane + (planeSize / 4);
}
T px;
uint8_t y_val, u_val, v_val;
px = src[y * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x] = y_val;
px = src[y * srcAlignedWidth + x1];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x1] = y_val;
px = src[y1 * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y1 * dstPitch + x] = y_val;
px = src[y1 * srcAlignedWidth + x1];
rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val);
y_plane[y1 * dstPitch + x1] = y_val;
const int uvPitch = dstPitch / 2;
const int uvIndex = (y / 2) * uvPitch + (x / 2);
u_plane[uvIndex] = u_val;
v_plane[uvIndex] = v_val;
}
template <typename T, bool formatNV12>
__global__ void RGB_to_NV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height )
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
printf("[%d,%d]\n",x,y);
const int x1 = x + 1;
const int y1 = y + 1;
if( x1 >= width || y1 >= height )
return;
const int planeSize = height * dstPitch;
uint8_t* y_plane = dst;
uint8_t* u_plane;
u_plane = y_plane + planeSize;
T px;
uint8_t y_val, u_val, v_val;
px = src[y * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x] = y_val;
px = src[y * srcAlignedWidth + x1];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x1] = y_val;
px = src[y1 * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y1 * dstPitch + x] = y_val;
px = src[y1 * srcAlignedWidth + x1];
rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val);
y_plane[y1 * dstPitch + x1] = y_val;
const int uvPitch = dstPitch / 2;
const int uvIndex = (y / 2) * uvPitch + (x / 2);
if(formatNV12){
u_plane[uvIndex*2+1] = u_val;
u_plane[uvIndex*2] = v_val;
}else{
u_plane[uvIndex*2] = u_val;
u_plane[uvIndex*2+1] = v_val;
}
}
__device__ void YUV2RGB(uint32_t *yuvi, float *red, float *green, float *blue)
{
// Prepare for hue adjustment
/*
float luma, chromaCb, chromaCr;
luma = (float)yuvi[0];
chromaCb = (float)((int)yuvi[1] - 512.0f);
chromaCr = (float)((int)yuvi[2] - 512.0f);
// Convert YUV To RGB with hue adjustment
*red = MUL(luma, constHueColorSpaceMat[0]) +
MUL(chromaCb, constHueColorSpaceMat[1]) +
MUL(chromaCr, constHueColorSpaceMat[2]);
*green= MUL(luma, constHueColorSpaceMat[3]) +
MUL(chromaCb, constHueColorSpaceMat[4]) +
MUL(chromaCr, constHueColorSpaceMat[5]);
*blue = MUL(luma, constHueColorSpaceMat[6]) +
MUL(chromaCb, constHueColorSpaceMat[7]) +
MUL(chromaCr, constHueColorSpaceMat[8]);*/
const float luma = float(yuvi[0]);
const float u = float(yuvi[1]) - 512.0f;
const float v = float(yuvi[2]) - 512.0f;
/*R = Y + 1.140V
G = Y - 0.395U - 0.581V
B = Y + 2.032U*/
/**green = luma + 1.140f * v;
*blue = luma - 0.395f * u - 0.581f * v;
*red = luma + 2.032f * u;*/
*red = luma + 1.140f * v;
*green = luma - 0.395f * u - 0.581f * v;
*blue = luma + 2.032f * u;
}
__device__ uint32_t RGBAPACK_8bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 255.0f);
green = min(max(green, 0.0f), 255.0f);
blue = min(max(blue, 0.0f), 255.0f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = ((((uint32_t)red) << 24) | (((uint32_t)green) << 16) | (((uint32_t)blue) << 8) | (uint32_t)alpha);
return ARGBpixel;
}
__device__ uint32_t RGBAPACK_10bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 1023.f);
green = min(max(green, 0.0f), 1023.f);
blue = min(max(blue, 0.0f), 1023.f);
uint32_t intRed = (uint32_t)red;
intRed = intRed >> 2;
// Convert to 8 bit unsigned integers per color component
// ARGBpixel = ((((uint32_t)red >> 2) << 24) |
// (((uint32_t)green >> 2) << 16) |
// (((uint32_t)blue >> 2) << 8) | (uint32_t)alpha);
// ARGBpixel = ((((uint32_t)red >> 2) << 24) |(((uint32_t)green >> 2) << 16) |(((uint32_t)blue >> 2) << 8) );
// printf("[%d,%d] int red %d ,int green %d,blue %d",((uint32_t)red >> 2),((uint32_t)green >> 2),((uint32_t)blue >> 2),ARGBpixel);
uint8_t * pRed =(uint8_t *) &ARGBpixel;
*pRed=((uint32_t)red >> 2);
*(pRed+1)=((uint32_t)green >> 2);
*(pRed+2)=((uint32_t)blue >> 2);
printf("red is %d,green is %d,blue is %d,postion0-3,%d,%d,%d,%d\n",((uint32_t)red >> 2),((uint32_t)green >> 2),((uint32_t)blue >> 2),*(pRed),*(pRed+1),*(pRed+2),*(pRed+3));
return ARGBpixel;
}
__global__ void NV12ToARGB(uint32_t *srcImage, size_t nSourcePitch,
uint32_t *dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint32_t dstImagePitch = nDestPitch >> 2;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;
uint32_t chromaOffset = processingPitch * height;
int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32_t chromaCb;
uint32_t chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
uint32_t yuvi[6];
float red[2], green[2], blue[2];
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK);
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK);
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
// YUV to RGB Transformation conversion
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_10bit(red[0], green[0], blue[0], constAlpha);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(red[1], green[1], blue[1], constAlpha);
uint8_t * pRead =(uint8_t*) &dstImage[y * dstImagePitch + x ];
// if(x%4 ==0&&y%4==0){
// printf("[%d,%d] red is %d,green is %d \n",x,y,pRead[0],pRead[1]);
// }
}
void rgb2yuv(const char *src,uint8_t *dest,int width,int height){
printf("rgb2yuv width %d,height %d\n",width,height);
const dim3 block(32, 8);
const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2));
uchar3 * pChar3 = (uchar3 *) src;
// Allocate the device input vector B
uchar3 *nvPChar2 = NULL;
hipError_t err = hipMalloc((void **)&nvPChar2, width*height*sizeof(uchar3));
uint8_t *nvYuv = NULL;
err = hipMalloc((void **)&nvYuv, width*height*sizeof(uint8_t)*3/2);
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(nvPChar2, pChar3, width*height*sizeof(uchar3), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( RGB_to_YV12<uchar3, true>), dim3(grid), dim3(block), 0, 0, nvPChar2, width,nvYuv, width, width, height);
err = hipMemcpy(dest, nvYuv, width*height*3/2, hipMemcpyDeviceToHost);
err = hipFree(nvPChar2);
err = hipFree(nvYuv);
CHECK(hipDeviceSynchronize());
}
void rgb2NV12(const char *src,uint8_t *dest,int width,int height){
printf("rgb2yuv width %d,height %d\n",width,height);
const dim3 block(32, 8);
const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2));
uchar3 * pChar3 = (uchar3 *) src;
// Allocate the device input vector B
uchar3 *nvPChar2 = NULL;
hipError_t err = hipMalloc((void **)&nvPChar2, width*height*sizeof(uchar3));
uint8_t *nvYuv = NULL;
err = hipMalloc((void **)&nvYuv, width*height*sizeof(uint8_t)*3/2);
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(nvPChar2, pChar3, width*height*sizeof(uchar3), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( RGB_to_NV12<uchar3, true>), dim3(grid), dim3(block), 0, 0, nvPChar2, width,nvYuv, width, width, height);
err = hipMemcpy(dest, nvYuv, width*height*3/2, hipMemcpyDeviceToHost);
err = hipFree(nvPChar2);
err = hipFree(nvYuv);
CHECK(hipDeviceSynchronize());
}
bool nv12ColorspaceSetup = false;
// cudaNV12SetupColorspace
hipError_t cudaNV12SetupColorspace( float hue = 0.0f )
{
const float hueSin = sin(hue);
const float hueCos = cos(hue);
float hueCSC[9];
const bool itu601 = false;
if( itu601 /*CSC == ITU601*/)
{
//CCIR 601
hueCSC[0] = 1.1644f;
hueCSC[1] = hueSin * 1.5960f;
hueCSC[2] = hueCos * 1.5960f;
hueCSC[3] = 1.1644f;
hueCSC[4] = (hueCos * -0.3918f) - (hueSin * 0.8130f);
hueCSC[5] = (hueSin * 0.3918f) - (hueCos * 0.8130f);
hueCSC[6] = 1.1644f;
hueCSC[7] = hueCos * 2.0172f;
hueCSC[8] = hueSin * -2.0172f;
}
else /*if(CSC == ITU709)*/
{
//CCIR 709
hueCSC[0] = 1.0f;
hueCSC[1] = hueSin * 1.57480f;
hueCSC[2] = hueCos * 1.57480f;
hueCSC[3] = 1.0;
hueCSC[4] = (hueCos * -0.18732f) - (hueSin * 0.46812f);
hueCSC[5] = (hueSin * 0.18732f) - (hueCos * 0.46812f);
hueCSC[6] = 1.0f;
hueCSC[7] = hueCos * 1.85560f;
hueCSC[8] = hueSin * -1.85560f;
}
if( CUDA_FAILED(hipMemcpyToSymbol(constHueColorSpaceMat, hueCSC, sizeof(float) * 9)) )
return hipErrorInvalidSymbol;
uint32_t cudaAlpha = ((uint32_t)0xff);
if( CUDA_FAILED(hipMemcpyToSymbol(constAlpha, &cudaAlpha, sizeof(uint32_t))) )
return hipErrorInvalidSymbol;
nv12ColorspaceSetup = true;
return hipSuccess;
}
// cudaNV12ToARGB32
void cudaNV12ToRGBA( char* srcDev, size_t srcPitch, char* destDev, size_t destPitch, size_t width, size_t height )
{
// if( !srcDev || !destDev )
// return hipErrorInvalidDevicePointer;
// if( srcPitch == 0 || destPitch == 0 || width == 0 || height == 0 )
// return hipErrorInvalidValue;
if( !nv12ColorspaceSetup )
cudaNV12SetupColorspace(0.0f);
const dim3 blockDim(32,16,1);
const dim3 gridDim((width+(2*blockDim.x-1))/(2*blockDim.x), (height+(blockDim.y-1))/blockDim.y, 1);
char *nvRGBA = NULL;
hipError_t err = hipMalloc((void **)&nvRGBA, width*height*sizeof(char)*4);
char *nvNV12 = NULL;
err = hipMalloc((void **)&nvNV12, width*height*sizeof(char)*3/2);
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(nvNV12, srcDev, width*height*sizeof(char)*3/2, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( NV12ToARGB), dim3(gridDim), dim3(blockDim), 0, 0, (uint32_t*)nvNV12, srcPitch, (uint32_t*)nvRGBA, destPitch, width, height );
err = hipMemcpy(destDev, nvRGBA, width*height*4, hipMemcpyDeviceToHost);
err = hipFree(nvRGBA);
err = hipFree(nvNV12);
CHECK(hipDeviceSynchronize());
} | a6c705fd15d07743be456f791f953ccc4a30ce2a.cu | #include "rgb2yuv.cuh"
#include <stdint.h>
#include <cuda_runtime.h>
#define CHECK(res) { if(res != cudaSuccess){printf("Error :%s:%d , ", __FILE__,__LINE__); \
printf("code : %d , reason : %s \n", res,cudaGetErrorString(res));exit(-1);}}
#define CUDA(x) cudaCheckError((x), #x, __FILE__, __LINE__)
#define CUDA_FAILED(x) (CUDA(x) != cudaSuccess)
#define COLOR_COMPONENT_MASK 0x3FF
#define COLOR_COMPONENT_BIT_SIZE 10
#define FIXED_DECIMAL_POINT 24
#define FIXED_POINT_MULTIPLIER 1.0f
#define FIXED_COLOR_COMPONENT_MASK 0xffffffff
#define LOG_CUDA "[cuda] "
__constant__ uint32_t constAlpha;
__constant__ float constHueColorSpaceMat[9];
/**
* iDivUp
*/
inline __device__ __host__ int iDivUp( int a, int b ) { return (a % b != 0) ? (a / b + 1) : (a / b); }
inline cudaError_t cudaCheckError(cudaError_t retval, const char* txt, const char* file, int line )
{
//int activeDevice = -1;
//cudaGetDevice(&activeDevice);
//Log("[cuda] device %i - %s\n", activeDevice, txt);
printf(LOG_CUDA "%s\n", txt);
if( retval != cudaSuccess )
{
printf(LOG_CUDA " %s (error %u) (hex 0x%02X)\n", cudaGetErrorString(retval), retval, retval);
printf(LOG_CUDA " %s:%i\n", file, line);
}
return retval;
}
inline __device__ void rgb_to_y(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y)
{
y = static_cast<uint8_t>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
inline __device__ void rgb_to_yuv(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y, uint8_t& u, uint8_t& v)
{
rgb_to_y(r, g, b, y);
u = static_cast<uint8_t>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uint8_t>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
template <typename T, bool formatYV12>
__global__ void RGB_to_YV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height )
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
printf("[%d,%d]\n",x,y);
const int x1 = x + 1;
const int y1 = y + 1;
if( x1 >= width || y1 >= height )
return;
const int planeSize = height * dstPitch;
uint8_t* y_plane = dst;
uint8_t* u_plane;
uint8_t* v_plane;
if( formatYV12 )
{
u_plane = y_plane + planeSize;
v_plane = u_plane + (planeSize / 4); // size of U & V planes is 25% of Y plane
}
else
{
v_plane = y_plane + planeSize; // in I420, order of U & V planes is reversed
u_plane = v_plane + (planeSize / 4);
}
T px;
uint8_t y_val, u_val, v_val;
px = src[y * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x] = y_val;
px = src[y * srcAlignedWidth + x1];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x1] = y_val;
px = src[y1 * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y1 * dstPitch + x] = y_val;
px = src[y1 * srcAlignedWidth + x1];
rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val);
y_plane[y1 * dstPitch + x1] = y_val;
const int uvPitch = dstPitch / 2;
const int uvIndex = (y / 2) * uvPitch + (x / 2);
u_plane[uvIndex] = u_val;
v_plane[uvIndex] = v_val;
}
template <typename T, bool formatNV12>
__global__ void RGB_to_NV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height )
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
printf("[%d,%d]\n",x,y);
const int x1 = x + 1;
const int y1 = y + 1;
if( x1 >= width || y1 >= height )
return;
const int planeSize = height * dstPitch;
uint8_t* y_plane = dst;
uint8_t* u_plane;
u_plane = y_plane + planeSize;
T px;
uint8_t y_val, u_val, v_val;
px = src[y * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x] = y_val;
px = src[y * srcAlignedWidth + x1];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x1] = y_val;
px = src[y1 * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y1 * dstPitch + x] = y_val;
px = src[y1 * srcAlignedWidth + x1];
rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val);
y_plane[y1 * dstPitch + x1] = y_val;
const int uvPitch = dstPitch / 2;
const int uvIndex = (y / 2) * uvPitch + (x / 2);
if(formatNV12){
u_plane[uvIndex*2+1] = u_val;
u_plane[uvIndex*2] = v_val;
}else{
u_plane[uvIndex*2] = u_val;
u_plane[uvIndex*2+1] = v_val;
}
}
__device__ void YUV2RGB(uint32_t *yuvi, float *red, float *green, float *blue)
{
// Prepare for hue adjustment
/*
float luma, chromaCb, chromaCr;
luma = (float)yuvi[0];
chromaCb = (float)((int)yuvi[1] - 512.0f);
chromaCr = (float)((int)yuvi[2] - 512.0f);
// Convert YUV To RGB with hue adjustment
*red = MUL(luma, constHueColorSpaceMat[0]) +
MUL(chromaCb, constHueColorSpaceMat[1]) +
MUL(chromaCr, constHueColorSpaceMat[2]);
*green= MUL(luma, constHueColorSpaceMat[3]) +
MUL(chromaCb, constHueColorSpaceMat[4]) +
MUL(chromaCr, constHueColorSpaceMat[5]);
*blue = MUL(luma, constHueColorSpaceMat[6]) +
MUL(chromaCb, constHueColorSpaceMat[7]) +
MUL(chromaCr, constHueColorSpaceMat[8]);*/
const float luma = float(yuvi[0]);
const float u = float(yuvi[1]) - 512.0f;
const float v = float(yuvi[2]) - 512.0f;
/*R = Y + 1.140V
G = Y - 0.395U - 0.581V
B = Y + 2.032U*/
/**green = luma + 1.140f * v;
*blue = luma - 0.395f * u - 0.581f * v;
*red = luma + 2.032f * u;*/
*red = luma + 1.140f * v;
*green = luma - 0.395f * u - 0.581f * v;
*blue = luma + 2.032f * u;
}
__device__ uint32_t RGBAPACK_8bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 255.0f);
green = min(max(green, 0.0f), 255.0f);
blue = min(max(blue, 0.0f), 255.0f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = ((((uint32_t)red) << 24) | (((uint32_t)green) << 16) | (((uint32_t)blue) << 8) | (uint32_t)alpha);
return ARGBpixel;
}
__device__ uint32_t RGBAPACK_10bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 1023.f);
green = min(max(green, 0.0f), 1023.f);
blue = min(max(blue, 0.0f), 1023.f);
uint32_t intRed = (uint32_t)red;
intRed = intRed >> 2;
// Convert to 8 bit unsigned integers per color component
// ARGBpixel = ((((uint32_t)red >> 2) << 24) |
// (((uint32_t)green >> 2) << 16) |
// (((uint32_t)blue >> 2) << 8) | (uint32_t)alpha);
// ARGBpixel = ((((uint32_t)red >> 2) << 24) |(((uint32_t)green >> 2) << 16) |(((uint32_t)blue >> 2) << 8) );
// printf("[%d,%d] int red %d ,int green %d,blue %d",((uint32_t)red >> 2),((uint32_t)green >> 2),((uint32_t)blue >> 2),ARGBpixel);
uint8_t * pRed =(uint8_t *) &ARGBpixel;
*pRed=((uint32_t)red >> 2);
*(pRed+1)=((uint32_t)green >> 2);
*(pRed+2)=((uint32_t)blue >> 2);
printf("red is %d,green is %d,blue is %d,postion0-3,%d,%d,%d,%d\n",((uint32_t)red >> 2),((uint32_t)green >> 2),((uint32_t)blue >> 2),*(pRed),*(pRed+1),*(pRed+2),*(pRed+3));
return ARGBpixel;
}
__global__ void NV12ToARGB(uint32_t *srcImage, size_t nSourcePitch,
uint32_t *dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint32_t dstImagePitch = nDestPitch >> 2;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;
uint32_t chromaOffset = processingPitch * height;
int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32_t chromaCb;
uint32_t chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
uint32_t yuvi[6];
float red[2], green[2], blue[2];
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK);
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK);
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
// YUV to RGB Transformation conversion
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_10bit(red[0], green[0], blue[0], constAlpha);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(red[1], green[1], blue[1], constAlpha);
uint8_t * pRead =(uint8_t*) &dstImage[y * dstImagePitch + x ];
// if(x%4 ==0&&y%4==0){
// printf("[%d,%d] red is %d,green is %d \n",x,y,pRead[0],pRead[1]);
// }
}
void rgb2yuv(const char *src,uint8_t *dest,int width,int height){
printf("rgb2yuv width %d,height %d\n",width,height);
const dim3 block(32, 8);
const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2));
uchar3 * pChar3 = (uchar3 *) src;
// Allocate the device input vector B
uchar3 *nvPChar2 = NULL;
cudaError_t err = cudaMalloc((void **)&nvPChar2, width*height*sizeof(uchar3));
uint8_t *nvYuv = NULL;
err = cudaMalloc((void **)&nvYuv, width*height*sizeof(uint8_t)*3/2);
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(nvPChar2, pChar3, width*height*sizeof(uchar3), cudaMemcpyHostToDevice);
RGB_to_YV12<uchar3, true><<<grid, block>>>(nvPChar2, width,nvYuv, width, width, height);
err = cudaMemcpy(dest, nvYuv, width*height*3/2, cudaMemcpyDeviceToHost);
err = cudaFree(nvPChar2);
err = cudaFree(nvYuv);
CHECK(cudaDeviceSynchronize());
}
void rgb2NV12(const char *src,uint8_t *dest,int width,int height){
printf("rgb2yuv width %d,height %d\n",width,height);
const dim3 block(32, 8);
const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2));
uchar3 * pChar3 = (uchar3 *) src;
// Allocate the device input vector B
uchar3 *nvPChar2 = NULL;
cudaError_t err = cudaMalloc((void **)&nvPChar2, width*height*sizeof(uchar3));
uint8_t *nvYuv = NULL;
err = cudaMalloc((void **)&nvYuv, width*height*sizeof(uint8_t)*3/2);
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(nvPChar2, pChar3, width*height*sizeof(uchar3), cudaMemcpyHostToDevice);
RGB_to_NV12<uchar3, true><<<grid, block>>>(nvPChar2, width,nvYuv, width, width, height);
err = cudaMemcpy(dest, nvYuv, width*height*3/2, cudaMemcpyDeviceToHost);
err = cudaFree(nvPChar2);
err = cudaFree(nvYuv);
CHECK(cudaDeviceSynchronize());
}
bool nv12ColorspaceSetup = false;
// cudaNV12SetupColorspace
cudaError_t cudaNV12SetupColorspace( float hue = 0.0f )
{
const float hueSin = sin(hue);
const float hueCos = cos(hue);
float hueCSC[9];
const bool itu601 = false;
if( itu601 /*CSC == ITU601*/)
{
//CCIR 601
hueCSC[0] = 1.1644f;
hueCSC[1] = hueSin * 1.5960f;
hueCSC[2] = hueCos * 1.5960f;
hueCSC[3] = 1.1644f;
hueCSC[4] = (hueCos * -0.3918f) - (hueSin * 0.8130f);
hueCSC[5] = (hueSin * 0.3918f) - (hueCos * 0.8130f);
hueCSC[6] = 1.1644f;
hueCSC[7] = hueCos * 2.0172f;
hueCSC[8] = hueSin * -2.0172f;
}
else /*if(CSC == ITU709)*/
{
//CCIR 709
hueCSC[0] = 1.0f;
hueCSC[1] = hueSin * 1.57480f;
hueCSC[2] = hueCos * 1.57480f;
hueCSC[3] = 1.0;
hueCSC[4] = (hueCos * -0.18732f) - (hueSin * 0.46812f);
hueCSC[5] = (hueSin * 0.18732f) - (hueCos * 0.46812f);
hueCSC[6] = 1.0f;
hueCSC[7] = hueCos * 1.85560f;
hueCSC[8] = hueSin * -1.85560f;
}
if( CUDA_FAILED(cudaMemcpyToSymbol(constHueColorSpaceMat, hueCSC, sizeof(float) * 9)) )
return cudaErrorInvalidSymbol;
uint32_t cudaAlpha = ((uint32_t)0xff);
if( CUDA_FAILED(cudaMemcpyToSymbol(constAlpha, &cudaAlpha, sizeof(uint32_t))) )
return cudaErrorInvalidSymbol;
nv12ColorspaceSetup = true;
return cudaSuccess;
}
// cudaNV12ToARGB32
void cudaNV12ToRGBA( char* srcDev, size_t srcPitch, char* destDev, size_t destPitch, size_t width, size_t height )
{
// if( !srcDev || !destDev )
// return cudaErrorInvalidDevicePointer;
// if( srcPitch == 0 || destPitch == 0 || width == 0 || height == 0 )
// return cudaErrorInvalidValue;
if( !nv12ColorspaceSetup )
cudaNV12SetupColorspace(0.0f);
const dim3 blockDim(32,16,1);
const dim3 gridDim((width+(2*blockDim.x-1))/(2*blockDim.x), (height+(blockDim.y-1))/blockDim.y, 1);
char *nvRGBA = NULL;
cudaError_t err = cudaMalloc((void **)&nvRGBA, width*height*sizeof(char)*4);
char *nvNV12 = NULL;
err = cudaMalloc((void **)&nvNV12, width*height*sizeof(char)*3/2);
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(nvNV12, srcDev, width*height*sizeof(char)*3/2, cudaMemcpyHostToDevice);
NV12ToARGB<<<gridDim, blockDim>>>( (uint32_t*)nvNV12, srcPitch, (uint32_t*)nvRGBA, destPitch, width, height );
err = cudaMemcpy(destDev, nvRGBA, width*height*4, cudaMemcpyDeviceToHost);
err = cudaFree(nvRGBA);
err = cudaFree(nvNV12);
CHECK(cudaDeviceSynchronize());
} |
25ac2e350bf802be4929970244e090161207f5be.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <cutil_inline.h>
#include "scan_common.h"
#include <hip/hip_runtime.h>
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 256
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
#if(0)
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size){
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for(uint offset = 1; offset < size; offset <<= 1){
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size){
return scan1Inclusive(idata, s_Data, size) - idata;
}
#else
#define LOG2_WARP_SIZE 5U
#define WARP_SIZE (1U << LOG2_WARP_SIZE)
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE
inline __device__ uint warpScanInclusive(uint idata, volatile uint *s_Data, uint size) {
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for(uint offset = 1; offset < size; offset <<= 1) {
s_Data[pos] += s_Data[pos - offset];
//printf("%d; s_Data[%d] += s_Data[%d - %d = %d]; %d += %d\n", threadIdx.x, pos, pos, offset, pos - offset, s_Data[pos], s_Data[pos - offset]);
}
return s_Data[pos];
}
inline __device__ uint warpScanExclusive(uint idata, volatile uint *s_Data, uint size){
return warpScanInclusive(idata, s_Data, size) - idata;
}
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size){
if(size > WARP_SIZE){
//Bottom-level inclusive warp scan
uint warpResult = warpScanInclusive(idata, s_Data, WARP_SIZE);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (WARP_SIZE - 1)) == (WARP_SIZE - 1) ) {
printf("%d; s_Data[%d]: %d XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n", threadIdx.x, threadIdx.x >> LOG2_WARP_SIZE, warpResult);
s_Data[threadIdx.x >> LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (THREADBLOCK_SIZE / WARP_SIZE) ) {
//grab top warp elements
uint val = s_Data[threadIdx.x];
//calculate exclsive scan and write back to shared memory
printf("calling warpScanExclusive(%d, s_Data, %d)\n", val, size >> LOG2_WARP_SIZE);
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data, size >> LOG2_WARP_SIZE);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> LOG2_WARP_SIZE];
} else {
return warpScanInclusive(idata, s_Data, size);
}
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size){
return scan1Inclusive(idata, s_Data, size) - idata;
}
#endif
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size){
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
printf("calling scan1Exclusive(%d, s_Data, %d / 4 = %d)\n", idata4.w, size, size / 4);
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size) {
printf("calling scan4Inclusive( (%d, %d, %d, %d), s_Data, %d)\n", idata4.x, idata4.y, idata4.z, idata4.w, size);
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
){
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
printf("scanExclusiveShared !!!!!!!!!!!!!!!!!!\n");
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
printf("pos: %d; blockIdx.x: %d; blockDim.x: %d; threadIdx.x: %d; %s@%d\n", pos, blockIdx.x, blockDim.x, threadIdx.x, __FILE__, __LINE__);
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
printf("scan4Exclusive result: %d,%d,%d,%d\n", odata4.x, odata4.y, odata4.z, odata4.w);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
){
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if(pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] +
d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if(pos < N)
d_Buf[pos] = odata;
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
){
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x == 0)
buf = d_Buffer[blockIdx.x];
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 64 * 1048576;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
extern "C" void initScan(void){
cutilSafeCall( hipMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint)) );
}
extern "C" void closeScan(void){
cutilSafeCall( hipFree(d_Buf) );
}
static uint factorRadix2(uint& log2L, uint L){
if(!L){
log2L = 0;
return 0;
} else {
for(log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor){
return ( (dividend % divisor) == 0 ) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
){
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert( factorizationRemainder == 1 );
//Check supported size range
assert( (arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE) );
//Check total batch size limit
assert( (batchSize * arrayLength) <= MAX_BATCH_ELEMENTS );
printf("(batchSize * arrayLength) %% (4 * THREADBLOCK_SIZE) = (%d * %d) %% (4 * %d) = %d\n", batchSize, arrayLength, THREADBLOCK_SIZE, (batchSize * arrayLength) % (4 * THREADBLOCK_SIZE));
//Check all threadblocks to be fully packed with data
assert( (batchSize * arrayLength) % (4 * THREADBLOCK_SIZE) == 0 );
printf("grid dim: %d; block dim: %d\n", ((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared), dim3((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
cutilCheckMsg("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
){
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert( factorizationRemainder == 1 );
//Check supported size range
assert( (arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE) );
//Check total batch size limit
assert( (batchSize * arrayLength) <= MAX_BATCH_ELEMENTS );
hipLaunchKernelGGL(( scanExclusiveShared), dim3((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
cutilCheckMsg("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp( (batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE );
hipLaunchKernelGGL(( scanExclusiveShared2), dim3(blockCount2), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE),
arrayLength / (4 * THREADBLOCK_SIZE)
);
cutilCheckMsg("scanExclusiveShared2() execution FAILED\n");
hipLaunchKernelGGL(( uniformUpdate), dim3((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)d_Buf
);
cutilCheckMsg("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
| 25ac2e350bf802be4929970244e090161207f5be.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <cutil_inline.h>
#include "scan_common.h"
#include <cuda.h>
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 256
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
#if(0)
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size){
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for(uint offset = 1; offset < size; offset <<= 1){
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size){
return scan1Inclusive(idata, s_Data, size) - idata;
}
#else
#define LOG2_WARP_SIZE 5U
#define WARP_SIZE (1U << LOG2_WARP_SIZE)
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE
inline __device__ uint warpScanInclusive(uint idata, volatile uint *s_Data, uint size) {
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for(uint offset = 1; offset < size; offset <<= 1) {
s_Data[pos] += s_Data[pos - offset];
//printf("%d; s_Data[%d] += s_Data[%d - %d = %d]; %d += %d\n", threadIdx.x, pos, pos, offset, pos - offset, s_Data[pos], s_Data[pos - offset]);
}
return s_Data[pos];
}
inline __device__ uint warpScanExclusive(uint idata, volatile uint *s_Data, uint size){
return warpScanInclusive(idata, s_Data, size) - idata;
}
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size){
if(size > WARP_SIZE){
//Bottom-level inclusive warp scan
uint warpResult = warpScanInclusive(idata, s_Data, WARP_SIZE);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (WARP_SIZE - 1)) == (WARP_SIZE - 1) ) {
printf("%d; s_Data[%d]: %d XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n", threadIdx.x, threadIdx.x >> LOG2_WARP_SIZE, warpResult);
s_Data[threadIdx.x >> LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (THREADBLOCK_SIZE / WARP_SIZE) ) {
//grab top warp elements
uint val = s_Data[threadIdx.x];
//calculate exclsive scan and write back to shared memory
printf("calling warpScanExclusive(%d, s_Data, %d)\n", val, size >> LOG2_WARP_SIZE);
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data, size >> LOG2_WARP_SIZE);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> LOG2_WARP_SIZE];
} else {
return warpScanInclusive(idata, s_Data, size);
}
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size){
return scan1Inclusive(idata, s_Data, size) - idata;
}
#endif
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size){
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
printf("calling scan1Exclusive(%d, s_Data, %d / 4 = %d)\n", idata4.w, size, size / 4);
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size) {
printf("calling scan4Inclusive( (%d, %d, %d, %d), s_Data, %d)\n", idata4.x, idata4.y, idata4.z, idata4.w, size);
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
){
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
printf("scanExclusiveShared !!!!!!!!!!!!!!!!!!\n");
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
printf("pos: %d; blockIdx.x: %d; blockDim.x: %d; threadIdx.x: %d; %s@%d\n", pos, blockIdx.x, blockDim.x, threadIdx.x, __FILE__, __LINE__);
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
printf("scan4Exclusive result: %d,%d,%d,%d\n", odata4.x, odata4.y, odata4.z, odata4.w);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
){
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if(pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] +
d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if(pos < N)
d_Buf[pos] = odata;
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
){
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x == 0)
buf = d_Buffer[blockIdx.x];
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 64 * 1048576;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
extern "C" void initScan(void){
cutilSafeCall( cudaMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint)) );
}
extern "C" void closeScan(void){
cutilSafeCall( cudaFree(d_Buf) );
}
static uint factorRadix2(uint& log2L, uint L){
if(!L){
log2L = 0;
return 0;
} else {
for(log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor){
return ( (dividend % divisor) == 0 ) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
){
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert( factorizationRemainder == 1 );
//Check supported size range
assert( (arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE) );
//Check total batch size limit
assert( (batchSize * arrayLength) <= MAX_BATCH_ELEMENTS );
printf("(batchSize * arrayLength) %% (4 * THREADBLOCK_SIZE) = (%d * %d) %% (4 * %d) = %d\n", batchSize, arrayLength, THREADBLOCK_SIZE, (batchSize * arrayLength) % (4 * THREADBLOCK_SIZE));
//Check all threadblocks to be fully packed with data
assert( (batchSize * arrayLength) % (4 * THREADBLOCK_SIZE) == 0 );
printf("grid dim: %d; block dim: %d\n", ((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), THREADBLOCK_SIZE);
scanExclusiveShared<<<(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
cutilCheckMsg("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
){
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert( factorizationRemainder == 1 );
//Check supported size range
assert( (arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE) );
//Check total batch size limit
assert( (batchSize * arrayLength) <= MAX_BATCH_ELEMENTS );
scanExclusiveShared<<<(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
cutilCheckMsg("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp( (batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE );
scanExclusiveShared2<<< blockCount2, THREADBLOCK_SIZE>>>(
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE),
arrayLength / (4 * THREADBLOCK_SIZE)
);
cutilCheckMsg("scanExclusiveShared2() execution FAILED\n");
uniformUpdate<<<(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)d_Buf
);
cutilCheckMsg("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
|
3b523a26171a2136aada2ed912ccff928d421041.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Parallels and Distributed Systems Exercise 3
* v3. CUDA modified ising model evolution,each block use block threads' shared memory.
* Author:Michael Karatzas
* AEM:9137
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "ising.h"
#include "essentials.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime_api.h"
//The max threads per block for my gpu (gt 540m) is 1024 so it must be BLOCK_DIM_X* BLOCK_DIM_Y<=1024
//(Preferably:set BLOCK_DIM_X and BLOCK_DIM_Y a multiple of 4)
#define BLOCK_DIM_X 24
#define BLOCK_DIM_Y 24
#define GRID_DIM_X 4
#define GRID_DIM_Y 4
#define RADIUS 2
//Functions'-kernels' Declarations
__global__
void nextStateCalculation(int *Gptr,int *newMat, double * w , int n, int * flag);
__device__ __forceinline__
void getTheSpin(int * Lat,int * newLat, double * weights , int n, int lRowIndex,
int lColIndex,int gRowIndex,int gColIndex, int * flag);
///Functions'-kernels' Definitions
void ising( int *G, double *w, int k, int n){
//Flag for indicate if there was no changes in the lattice during a step,in order to terminate the evolving.
int no_changes_flag;
int * d_G,*d_secondG, *d_no_changes_flag;
double * d_w;
//Allocate memory for the no changes flag in the Device
if( hipMalloc(&d_no_changes_flag, (size_t)sizeof(int)) != hipSuccess){
printf("Couldn't allocate memory in device (GPU) !");
exit(1);
}
//Allocate memory and "transfer" the G Matrix in the Device
if( hipMalloc((void **)&d_G, (size_t)sizeof(int)*n*n) != hipSuccess){
printf("Couldn't allocate memory in device (GPU) !");
exit(1);
}
hipMemcpy(d_G, G, (size_t)sizeof(int)*n*n, hipMemcpyHostToDevice);
//Allocate memory and "transfer" the Weights Matrix in the Device
if( hipMalloc((void **)&d_w, (size_t)sizeof(double)*5*5) != hipSuccess){
printf("Couldn't allocate memory in device (GPU) !");
exit(1);
}
hipMemcpy(d_w, w, (size_t)sizeof(double)*5*5, hipMemcpyHostToDevice);
//Allocate memory for the second G matrix only in GPU(device)
if(hipMalloc((void **)&d_secondG, (size_t)sizeof(int)*n*n) != hipSuccess){
printf("Couldn't allocate memory in device (GPU) !");
exit(1);
}
//Setting block's and grid's dimensions
dim3 dimBlock(BLOCK_DIM_X,BLOCK_DIM_Y);
dim3 dimGrid(GRID_DIM_X,GRID_DIM_Y);
//Evolving the model for k steps
for(int i=0 ; i<k ;i++){
/*no_changes_flag=1, indicates no change in the lattice, if there are changes
nextStateCalculation() kernel will update its value.*/
no_changes_flag=1;
hipMemcpy(d_no_changes_flag, &no_changes_flag, (size_t)sizeof(int), hipMemcpyHostToDevice);
//calling the nextStateCalculation() kernel
hipLaunchKernelGGL(( nextStateCalculation), dim3(dimGrid),dim3(dimBlock), 0, 0, d_G,d_secondG,d_w,n,d_no_changes_flag);
hipDeviceSynchronize();
//Swapping the pointers between the two Matrices in device
pointer_swap(&d_G,&d_secondG);
//The host get the value of the no changes flag as indication if no changes happened during the step.
hipMemcpy(&no_changes_flag, d_no_changes_flag, (size_t)sizeof(int), hipMemcpyDeviceToHost);
//If there are no changes in the lattice we stop evolving the model
if(no_changes_flag){
break;
}
}
//Passing updated values of G matrix in the host(CPU).
hipMemcpy(G,d_G,(size_t)sizeof(int)*n*n,hipMemcpyDeviceToHost);
//Freeing memory space I don't need from GPU to avoid memory leaks.
hipFree(d_G);
hipFree(d_secondG);
hipFree(d_w);
}
__global__
void nextStateCalculation(int *Gptr,int *newMat, double * w , int n, int * flag){
/* The part of the G matrix that is needed to be read in the block shared memory
are the spots that their spin is going to get computed by the block
and two "offset" spot around every edgy spot, that will be needed for the spin
computation as they are neighbots of the edgy spots */
//The part of the G matrix that will pass in the shared memory.
__shared__ int sharedGpart[(BLOCK_DIM_X+2*RADIUS) * (BLOCK_DIM_Y+2*RADIUS)];
//The number of columns of the shared G part
int sharedNcols=(BLOCK_DIM_X+2*RADIUS) ;
//matrix to store the shared weight matrices
__shared__ double w_shared[25];
//The step of each thread
int strideX = blockDim.x *gridDim.x ;
int strideY = blockDim.y *gridDim.y ;
//The unigue global indixes of the threads in the grid
int gIndex_X = threadIdx.x +blockDim.x*blockIdx.x;//global x index
int gIndex_Y = threadIdx.y +blockDim.y*blockIdx.y;//global y index
//The local (in the block) Index
int lIndex_X=threadIdx.x+RADIUS;//local(in the block) x index
int lIndex_Y=threadIdx.y+RADIUS;//local(in the block) y index
//Accessing the spins in the global lattice and pass them in the shared matrix.
for(int i=gIndex_Y; i<n +RADIUS ;i+=strideY){
for(int j=gIndex_X; j<n +RADIUS;j+=strideX){
//Every thread read its own element in shared memory
sharedGpart[lIndex_Y*sharedNcols+lIndex_X]=Gptr[( (i + n)%n )*n + ( (j + n)%n )];
//Accessing and read read in shared memory the 2 left and 2 right "offset" elements on each row
if((threadIdx.x)<RADIUS){
int sharedGAccessorX= (lIndex_Y)*sharedNcols+(lIndex_X -RADIUS);
int GAccessorX=( (i + n)%n )*n+ ( ( (j-RADIUS) + n) % n);
sharedGpart[sharedGAccessorX]=Gptr[GAccessorX];
sharedGAccessorX=(lIndex_Y)*sharedNcols+(lIndex_X+BLOCK_DIM_X);
GAccessorX=( (i + n)%n )*n+( ( (j+BLOCK_DIM_X) + n) % n);
sharedGpart[sharedGAccessorX]=Gptr[GAccessorX];
//Accessing and read in shared memory "corner offset" elements(each corner has 4 elements)
if((threadIdx.y)<RADIUS){
//1st corner (4 points, up and left)
int sharedDiagAccessorX= (lIndex_Y -RADIUS)*sharedNcols +(lIndex_X-RADIUS);
int GDiagAccessorX=( ( (i-RADIUS) + n) % n)*n+( ( (j-RADIUS) + n) % n);
sharedGpart[sharedDiagAccessorX]=Gptr[GDiagAccessorX];
//2nd diagonial (4 points, down and left)
sharedDiagAccessorX= (lIndex_Y+BLOCK_DIM_Y)*sharedNcols +(lIndex_X-RADIUS);
GDiagAccessorX=( ( (i+BLOCK_DIM_Y) + n) % n)*n+( ( (j-RADIUS) + n) % n);
sharedGpart[sharedDiagAccessorX]=Gptr[GDiagAccessorX];
//3rd corner (4 points, down and right)
sharedDiagAccessorX= (lIndex_Y+BLOCK_DIM_Y)*sharedNcols +(lIndex_X+BLOCK_DIM_X);
GDiagAccessorX=( ( (i+BLOCK_DIM_Y) + n) % n)*n+( ( (j+BLOCK_DIM_X) + n) % n);
sharedGpart[sharedDiagAccessorX]=Gptr[GDiagAccessorX];
//4rd diagonial (4 points, up and right)
sharedDiagAccessorX= (lIndex_Y -RADIUS)*sharedNcols+(lIndex_X+BLOCK_DIM_X);
GDiagAccessorX=( ( (i-RADIUS) + n) % n)*n+( ( (j+BLOCK_DIM_X) + n) % n);
sharedGpart[sharedDiagAccessorX]=Gptr[GDiagAccessorX];
}
}
//Accessing and read read in shared memory the 2 top and 2 bottom "offset" elements on each row
if((threadIdx.y)<RADIUS){
int sharedGAccessorY= (lIndex_Y-RADIUS)*sharedNcols+lIndex_X;
int GAccessorY=( ( (i-RADIUS) + n) % n)*n+( (j + n)%n );
sharedGpart[sharedGAccessorY]=Gptr[GAccessorY];
sharedGAccessorY=(lIndex_Y+BLOCK_DIM_Y)*sharedNcols+lIndex_X;
GAccessorY=( ( (i+BLOCK_DIM_Y) + n) % n)*n+( (j + n)%n );
sharedGpart[sharedGAccessorY]=Gptr[GAccessorY];
}
/*f (BLOCK_DIM_Y>=5) && (BLOCK_DIM_X>=5),we use shared memory also for the weights matrix,
I didn't implement it for smaller dimensions, because the benefit is very small anyway and it will
make our code more complex .Also we choose BLOCK_DIM_X = BLOCK_DIM_Y =24 and BLOCK_DIM_X<5 or
BLOCK_DIM_Y<5 aren't used in practice, so WE get the small benefit by transfering the weights' matrix. */
if((BLOCK_DIM_Y>=5) && (BLOCK_DIM_X>=5)){
if(threadIdx.x<5 &&threadIdx.y<5)
w_shared[threadIdx.x*5+ threadIdx.y]=w[threadIdx.x*5+ threadIdx.y];
}
//Here we synchronize the block threads in order Shared G values are
//updated for each thread and w values are updated
__syncthreads();
if((i<n)&&(j<n)){
if((BLOCK_DIM_Y>=5) && (BLOCK_DIM_X>=5))
getTheSpin(sharedGpart,newMat, w_shared,n,lIndex_Y, lIndex_X,i,j,flag);
else //if((BLOCK_DIM_Y<5) && (BLOCK_DIM_X35))
getTheSpin(sharedGpart,newMat, w,n,lIndex_Y, lIndex_X,i,j,flag);
}
__syncthreads();
}
}
}
__device__ __forceinline__
void getTheSpin(int * Lat,int * newLat, double * weights , int n, int lRowIndex,int lColIndex,
int gRowIndex,int gColIndex, int * flag ){
double total=0;
//Calculating the Total influence for a certain spot, by scanning the block shared part of G.
for(int i=lRowIndex-2;i<lRowIndex+3;i++ ){
for(int j=lColIndex-2;j<lColIndex+3;j++ ){
if((i==lRowIndex) && (j==lColIndex))
continue;
//Total influence update
total+=Lat[ i*(BLOCK_DIM_X+2*RADIUS) + j] *weights[(2+i-lRowIndex)*5 + (2+j-lColIndex)];
}
}
//Checking the conditions in order to get the next state spin
// if (total ==0), with taking into account possible floating point errors
if( (total<1e-6) && (total>(-1e-6)) ){
newLat[(gRowIndex)*n+(gColIndex)]=Lat[lRowIndex*(BLOCK_DIM_X+2*RADIUS)+lColIndex];
}
//if change in a certain spot happens we update no change flag's value into 0.
else if(total<0){
//Checking if there is change in this certain spot
if(Lat[lRowIndex*(BLOCK_DIM_X+2*RADIUS)+lColIndex]!=1)
*flag=0;
newLat[(gRowIndex)*n+(gColIndex)]=-1;
}
else if(total>0){
//Checking if there is change in this certain spot
if(Lat[lRowIndex*(BLOCK_DIM_X+2*RADIUS)+lColIndex]!=1)
*flag=0;
newLat[(gRowIndex)*n+(gColIndex)]=1;
}
}
| 3b523a26171a2136aada2ed912ccff928d421041.cu | /*
* Parallels and Distributed Systems Exercise 3
* v3. CUDA modified ising model evolution,each block use block threads' shared memory.
* Author:Michael Karatzas
* AEM:9137
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "ising.h"
#include "essentials.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "cuda_runtime_api.h"
//The max threads per block for my gpu (gt 540m) is 1024 so it must be BLOCK_DIM_X* BLOCK_DIM_Y<=1024
//(Preferably:set BLOCK_DIM_X and BLOCK_DIM_Y a multiple of 4)
#define BLOCK_DIM_X 24
#define BLOCK_DIM_Y 24
#define GRID_DIM_X 4
#define GRID_DIM_Y 4
#define RADIUS 2
//Functions'-kernels' Declarations
__global__
void nextStateCalculation(int *Gptr,int *newMat, double * w , int n, int * flag);
__device__ __forceinline__
void getTheSpin(int * Lat,int * newLat, double * weights , int n, int lRowIndex,
int lColIndex,int gRowIndex,int gColIndex, int * flag);
///Functions'-kernels' Definitions
void ising( int *G, double *w, int k, int n){
//Flag for indicate if there was no changes in the lattice during a step,in order to terminate the evolving.
int no_changes_flag;
int * d_G,*d_secondG, *d_no_changes_flag;
double * d_w;
//Allocate memory for the no changes flag in the Device
if( cudaMalloc(&d_no_changes_flag, (size_t)sizeof(int)) != cudaSuccess){
printf("Couldn't allocate memory in device (GPU) !");
exit(1);
}
//Allocate memory and "transfer" the G Matrix in the Device
if( cudaMalloc((void **)&d_G, (size_t)sizeof(int)*n*n) != cudaSuccess){
printf("Couldn't allocate memory in device (GPU) !");
exit(1);
}
cudaMemcpy(d_G, G, (size_t)sizeof(int)*n*n, cudaMemcpyHostToDevice);
//Allocate memory and "transfer" the Weights Matrix in the Device
if( cudaMalloc((void **)&d_w, (size_t)sizeof(double)*5*5) != cudaSuccess){
printf("Couldn't allocate memory in device (GPU) !");
exit(1);
}
cudaMemcpy(d_w, w, (size_t)sizeof(double)*5*5, cudaMemcpyHostToDevice);
//Allocate memory for the second G matrix only in GPU(device)
if(cudaMalloc((void **)&d_secondG, (size_t)sizeof(int)*n*n) != cudaSuccess){
printf("Couldn't allocate memory in device (GPU) !");
exit(1);
}
//Setting block's and grid's dimensions
dim3 dimBlock(BLOCK_DIM_X,BLOCK_DIM_Y);
dim3 dimGrid(GRID_DIM_X,GRID_DIM_Y);
//Evolving the model for k steps
for(int i=0 ; i<k ;i++){
/*no_changes_flag=1, indicates no change in the lattice, if there are changes
nextStateCalculation() kernel will update its value.*/
no_changes_flag=1;
cudaMemcpy(d_no_changes_flag, &no_changes_flag, (size_t)sizeof(int), cudaMemcpyHostToDevice);
//calling the nextStateCalculation() kernel
nextStateCalculation<<<dimGrid,dimBlock>>>(d_G,d_secondG,d_w,n,d_no_changes_flag);
cudaDeviceSynchronize();
//Swapping the pointers between the two Matrices in device
pointer_swap(&d_G,&d_secondG);
//The host get the value of the no changes flag as indication if no changes happened during the step.
cudaMemcpy(&no_changes_flag, d_no_changes_flag, (size_t)sizeof(int), cudaMemcpyDeviceToHost);
//If there are no changes in the lattice we stop evolving the model
if(no_changes_flag){
break;
}
}
//Passing updated values of G matrix in the host(CPU).
cudaMemcpy(G,d_G,(size_t)sizeof(int)*n*n,cudaMemcpyDeviceToHost);
//Freeing memory space I don't need from GPU to avoid memory leaks.
cudaFree(d_G);
cudaFree(d_secondG);
cudaFree(d_w);
}
__global__
void nextStateCalculation(int *Gptr,int *newMat, double * w , int n, int * flag){
/* The part of the G matrix that is needed to be read in the block shared memory
are the spots that their spin is going to get computed by the block
and two "offset" spot around every edgy spot, that will be needed for the spin
computation as they are neighbots of the edgy spots */
//The part of the G matrix that will pass in the shared memory.
__shared__ int sharedGpart[(BLOCK_DIM_X+2*RADIUS) * (BLOCK_DIM_Y+2*RADIUS)];
//The number of columns of the shared G part
int sharedNcols=(BLOCK_DIM_X+2*RADIUS) ;
//matrix to store the shared weight matrices
__shared__ double w_shared[25];
//The step of each thread
int strideX = blockDim.x *gridDim.x ;
int strideY = blockDim.y *gridDim.y ;
//The unigue global indixes of the threads in the grid
int gIndex_X = threadIdx.x +blockDim.x*blockIdx.x;//global x index
int gIndex_Y = threadIdx.y +blockDim.y*blockIdx.y;//global y index
//The local (in the block) Index
int lIndex_X=threadIdx.x+RADIUS;//local(in the block) x index
int lIndex_Y=threadIdx.y+RADIUS;//local(in the block) y index
//Accessing the spins in the global lattice and pass them in the shared matrix.
for(int i=gIndex_Y; i<n +RADIUS ;i+=strideY){
for(int j=gIndex_X; j<n +RADIUS;j+=strideX){
//Every thread read its own element in shared memory
sharedGpart[lIndex_Y*sharedNcols+lIndex_X]=Gptr[( (i + n)%n )*n + ( (j + n)%n )];
//Accessing and read read in shared memory the 2 left and 2 right "offset" elements on each row
if((threadIdx.x)<RADIUS){
int sharedGAccessorX= (lIndex_Y)*sharedNcols+(lIndex_X -RADIUS);
int GAccessorX=( (i + n)%n )*n+ ( ( (j-RADIUS) + n) % n);
sharedGpart[sharedGAccessorX]=Gptr[GAccessorX];
sharedGAccessorX=(lIndex_Y)*sharedNcols+(lIndex_X+BLOCK_DIM_X);
GAccessorX=( (i + n)%n )*n+( ( (j+BLOCK_DIM_X) + n) % n);
sharedGpart[sharedGAccessorX]=Gptr[GAccessorX];
//Accessing and read in shared memory "corner offset" elements(each corner has 4 elements)
if((threadIdx.y)<RADIUS){
//1st corner (4 points, up and left)
int sharedDiagAccessorX= (lIndex_Y -RADIUS)*sharedNcols +(lIndex_X-RADIUS);
int GDiagAccessorX=( ( (i-RADIUS) + n) % n)*n+( ( (j-RADIUS) + n) % n);
sharedGpart[sharedDiagAccessorX]=Gptr[GDiagAccessorX];
//2nd diagonial (4 points, down and left)
sharedDiagAccessorX= (lIndex_Y+BLOCK_DIM_Y)*sharedNcols +(lIndex_X-RADIUS);
GDiagAccessorX=( ( (i+BLOCK_DIM_Y) + n) % n)*n+( ( (j-RADIUS) + n) % n);
sharedGpart[sharedDiagAccessorX]=Gptr[GDiagAccessorX];
//3rd corner (4 points, down and right)
sharedDiagAccessorX= (lIndex_Y+BLOCK_DIM_Y)*sharedNcols +(lIndex_X+BLOCK_DIM_X);
GDiagAccessorX=( ( (i+BLOCK_DIM_Y) + n) % n)*n+( ( (j+BLOCK_DIM_X) + n) % n);
sharedGpart[sharedDiagAccessorX]=Gptr[GDiagAccessorX];
//4rd diagonial (4 points, up and right)
sharedDiagAccessorX= (lIndex_Y -RADIUS)*sharedNcols+(lIndex_X+BLOCK_DIM_X);
GDiagAccessorX=( ( (i-RADIUS) + n) % n)*n+( ( (j+BLOCK_DIM_X) + n) % n);
sharedGpart[sharedDiagAccessorX]=Gptr[GDiagAccessorX];
}
}
//Accessing and read read in shared memory the 2 top and 2 bottom "offset" elements on each row
if((threadIdx.y)<RADIUS){
int sharedGAccessorY= (lIndex_Y-RADIUS)*sharedNcols+lIndex_X;
int GAccessorY=( ( (i-RADIUS) + n) % n)*n+( (j + n)%n );
sharedGpart[sharedGAccessorY]=Gptr[GAccessorY];
sharedGAccessorY=(lIndex_Y+BLOCK_DIM_Y)*sharedNcols+lIndex_X;
GAccessorY=( ( (i+BLOCK_DIM_Y) + n) % n)*n+( (j + n)%n );
sharedGpart[sharedGAccessorY]=Gptr[GAccessorY];
}
/*Ιf (BLOCK_DIM_Y>=5) && (BLOCK_DIM_X>=5),we use shared memory also for the weights matrix,
I didn't implement it for smaller dimensions, because the benefit is very small anyway and it will
make our code more complex .Also we choose BLOCK_DIM_X = BLOCK_DIM_Y =24 and BLOCK_DIM_X<5 or
BLOCK_DIM_Y<5 aren't used in practice, so WE get the small benefit by transfering the weights' matrix. */
if((BLOCK_DIM_Y>=5) && (BLOCK_DIM_X>=5)){
if(threadIdx.x<5 &&threadIdx.y<5)
w_shared[threadIdx.x*5+ threadIdx.y]=w[threadIdx.x*5+ threadIdx.y];
}
//Here we synchronize the block threads in order Shared G values are
//updated for each thread and w values are updated
__syncthreads();
if((i<n)&&(j<n)){
if((BLOCK_DIM_Y>=5) && (BLOCK_DIM_X>=5))
getTheSpin(sharedGpart,newMat, w_shared,n,lIndex_Y, lIndex_X,i,j,flag);
else //if((BLOCK_DIM_Y<5) && (BLOCK_DIM_X35))
getTheSpin(sharedGpart,newMat, w,n,lIndex_Y, lIndex_X,i,j,flag);
}
__syncthreads();
}
}
}
__device__ __forceinline__
void getTheSpin(int * Lat,int * newLat, double * weights , int n, int lRowIndex,int lColIndex,
int gRowIndex,int gColIndex, int * flag ){
double total=0;
//Calculating the Total influence for a certain spot, by scanning the block shared part of G.
for(int i=lRowIndex-2;i<lRowIndex+3;i++ ){
for(int j=lColIndex-2;j<lColIndex+3;j++ ){
if((i==lRowIndex) && (j==lColIndex))
continue;
//Total influence update
total+=Lat[ i*(BLOCK_DIM_X+2*RADIUS) + j] *weights[(2+i-lRowIndex)*5 + (2+j-lColIndex)];
}
}
//Checking the conditions in order to get the next state spin
// if (total ==0), with taking into account possible floating point errors
if( (total<1e-6) && (total>(-1e-6)) ){
newLat[(gRowIndex)*n+(gColIndex)]=Lat[lRowIndex*(BLOCK_DIM_X+2*RADIUS)+lColIndex];
}
//if change in a certain spot happens we update no change flag's value into 0.
else if(total<0){
//Checking if there is change in this certain spot
if(Lat[lRowIndex*(BLOCK_DIM_X+2*RADIUS)+lColIndex]!=1)
*flag=0;
newLat[(gRowIndex)*n+(gColIndex)]=-1;
}
else if(total>0){
//Checking if there is change in this certain spot
if(Lat[lRowIndex*(BLOCK_DIM_X+2*RADIUS)+lColIndex]!=1)
*flag=0;
newLat[(gRowIndex)*n+(gColIndex)]=1;
}
}
|
5e178e6b10e80fb18b22291cf8a0b1da123e0dcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/hip/Resize.cuh>
#include <c10/util/Exception.h>
#include <THH/THHGeneral.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <hipcub/hipcub.hpp>
#include <algorithm>
#include <cstddef>
#include <cmath>
#include <limits>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
// the default value of `m` equals to `n`
return at::native::eye_out_cuda(result, n, n);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
TORCH_CHECK(m >= 0, "m must be greater or equal to 0, got ", m);
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt) {
AT_ASSERT(device_or_default(device_opt).type() == at::DeviceType::CUDA);
TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = dtype_or_default(dtype_opt);
auto dtype_meta = scalarTypeToTypeMeta(dtype);
int64_t size_bytes = nelements * dtype_meta.itemsize();
auto storage_impl = c10::make_intrusive<StorageImpl>(
c10::StorageImpl::use_byte_size_t(),
size_bytes,
allocator->allocate(size_bytes),
allocator,
/*resizeable=*/true);
auto tensor =
detail::make_tensor<TensorImpl>(storage_impl, DispatchKey::CUDA, dtype_meta);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
auto memory_format = memory_format_opt.value_or(MemoryFormat::Contiguous);
tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
auto t = at::native::empty_cuda({0}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, c10::optional<Generator> generator) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
TORCH_CHECK(!generator.has_value() || (generator.has_value() && result.device() == generator->device()), "Expected a '", result.device(), "' generator device but found '", generator->device(), "'");
check_supported_max_int_with_precision(n, result);
result.resize_({n});
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
return result.copy_(result_cpu);
}
#if 0
// This if condition should never be true because if n >= 30000 and the tensor has a Half type,
// check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here
// for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this.
if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid.
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
return result.copy_(randperm_out_cuda(result_float, n, generator));
}
#endif
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
TORCH_CHECK(n <= std::numeric_limits<int>::max(),
"randperm of tensors larger than INT_MAX is not supported yet in pytorch");
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto range = at::arange(n, result.options());
auto keys_tmp = at::empty_like(keys);
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
scalar_t *shuffled_data;
if (result.is_contiguous()) {
shuffled_data = result.data_ptr<scalar_t>();
} else {
shuffled = at::empty(n, result.options());
shuffled_data = shuffled.data_ptr<scalar_t>();
}
// Use the sorted order of keys to rearrange the result array
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes,
keys.data_ptr<scalar_t>(), keys_tmp.data_ptr<scalar_t>(),
range.data_ptr<scalar_t>(), shuffled_data, n,
0, sizeof(scalar_t) * 8, at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
auto dataPtr = allocator.allocate(temp_storage_bytes);
hipcub::DeviceRadixSort::SortPairs(
dataPtr.get(), temp_storage_bytes,
keys.data_ptr<scalar_t>(), keys_tmp.data_ptr<scalar_t>(),
range.data_ptr<scalar_t>(), shuffled_data, n,
0, sizeof(scalar_t) * 8, at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
}
);
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the conversion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
hipLaunchKernelGGL(( tril_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data_ptr<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
hipLaunchKernelGGL(( triu_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data_ptr<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
}} // namespace at::native
| 5e178e6b10e80fb18b22291cf8a0b1da123e0dcb.cu | #include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/cuda/Resize.cuh>
#include <c10/util/Exception.h>
#include <THC/THCGeneral.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <cub/cub.cuh>
#include <algorithm>
#include <cstddef>
#include <cmath>
#include <limits>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
// the default value of `m` equals to `n`
return at::native::eye_out_cuda(result, n, n);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
TORCH_CHECK(m >= 0, "m must be greater or equal to 0, got ", m);
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt) {
AT_ASSERT(device_or_default(device_opt).type() == at::DeviceType::CUDA);
TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = dtype_or_default(dtype_opt);
auto dtype_meta = scalarTypeToTypeMeta(dtype);
int64_t size_bytes = nelements * dtype_meta.itemsize();
auto storage_impl = c10::make_intrusive<StorageImpl>(
c10::StorageImpl::use_byte_size_t(),
size_bytes,
allocator->allocate(size_bytes),
allocator,
/*resizeable=*/true);
auto tensor =
detail::make_tensor<TensorImpl>(storage_impl, DispatchKey::CUDA, dtype_meta);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
auto memory_format = memory_format_opt.value_or(MemoryFormat::Contiguous);
tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
auto t = at::native::empty_cuda({0}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, c10::optional<Generator> generator) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
TORCH_CHECK(!generator.has_value() || (generator.has_value() && result.device() == generator->device()), "Expected a '", result.device(), "' generator device but found '", generator->device(), "'");
check_supported_max_int_with_precision(n, result);
result.resize_({n});
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
return result.copy_(result_cpu);
}
#if 0
// This if condition should never be true because if n >= 30000 and the tensor has a Half type,
// check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here
// for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this.
if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid.
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
return result.copy_(randperm_out_cuda(result_float, n, generator));
}
#endif
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
TORCH_CHECK(n <= std::numeric_limits<int>::max(),
"randperm of tensors larger than INT_MAX is not supported yet in pytorch");
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto range = at::arange(n, result.options());
auto keys_tmp = at::empty_like(keys);
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
scalar_t *shuffled_data;
if (result.is_contiguous()) {
shuffled_data = result.data_ptr<scalar_t>();
} else {
shuffled = at::empty(n, result.options());
shuffled_data = shuffled.data_ptr<scalar_t>();
}
// Use the sorted order of keys to rearrange the result array
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes,
keys.data_ptr<scalar_t>(), keys_tmp.data_ptr<scalar_t>(),
range.data_ptr<scalar_t>(), shuffled_data, n,
0, sizeof(scalar_t) * 8, at::cuda::getCurrentCUDAStream());
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
auto dataPtr = allocator.allocate(temp_storage_bytes);
cub::DeviceRadixSort::SortPairs(
dataPtr.get(), temp_storage_bytes,
keys.data_ptr<scalar_t>(), keys_tmp.data_ptr<scalar_t>(),
range.data_ptr<scalar_t>(), shuffled_data, n,
0, sizeof(scalar_t) * 8, at::cuda::getCurrentCUDAStream());
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
}
);
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the conversion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
tril_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data_ptr<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
triu_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data_ptr<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
}} // namespace at::native
|
a341b0cc6114c0c18f18d6170815e29129a59b56.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include "common.h"
extern "C" {
#include "city.h"
}
hipStream_t myStream;
/** < Functions for hashing from within a CUDA kernel */
static const uint32_t cu_c1 = 0xcc9e2d51;
static const uint32_t cu_c2 = 0x1b873593;
__device__ uint32_t cu_Rotate32(uint32_t val, int shift)
{
return shift == 0 ? val : ((val >> shift) | (val << (32 - shift)));
}
__device__ uint32_t cu_Mur(uint32_t a, uint32_t h)
{
a *= cu_c1;
a = cu_Rotate32(a, 17);
a *= cu_c2;
h ^= a;
h = cu_Rotate32(h, 19);
return h * 5 + 0xe6546b64;
}
__device__ uint32_t cu_fmix(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
__device__ uint32_t Hash32Len0to4(char *s, int len)
{
uint32_t b = 0;
uint32_t c = 9;
int i;
for(i = 0; i < len; i++) {
b = b * cu_c1 + s[i];
c ^= b;
}
return cu_fmix(cu_Mur(b, cu_Mur(len, c)));
}
/** < Hashing functions for CUDA kernels end here */
__global__ void
vectorAdd(int *pkts, int num_pkts)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_pkts) {
pkts[i] = Hash32Len0to4((char *) &pkts[i], 4);
}
}
double cpu_run(int *pkts, int num_pkts)
{
int i;
struct timespec start, end;
clock_gettime(CLOCK_REALTIME, &start);
for(i = 0; i < num_pkts; i += 1) {
pkts[i] = CityHash32((char *) &pkts[i], 4);
}
clock_gettime(CLOCK_REALTIME, &end);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#if INCLUDE_COPY_TIME == 1
/**< Include copy overhead in measurement */
double gpu_run(int *h_pkts, int *d_pkts, int num_pkts)
{
struct timespec start, end;
int err = hipSuccess;
clock_gettime(CLOCK_REALTIME, &start);
/**< Copy packets to device */
err = hipMemcpyAsync(d_pkts, h_pkts, num_pkts * sizeof(int),
hipMemcpyHostToDevice, myStream);
CPE(err != hipSuccess, "Failed to copy to device memory\n", -1);
/**< Kernel launch */
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, myStream, d_pkts,
num_pkts);
err = hipGetLastError();
CPE(err != hipSuccess, "Failed to launch vectorAdd kernel\n", -1);
/**< Copy back the results */
err = hipMemcpyAsync(h_pkts, d_pkts, num_pkts * sizeof(int),
hipMemcpyDeviceToHost, myStream);
CPE(err != hipSuccess, "Failed to copy C from device to host\n", -1);
/**< Wait for all stream ops to complete */
hipStreamSynchronize(myStream);
clock_gettime(CLOCK_REALTIME, &end);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#else
/**< Don't include copy overhead in measurement */
double gpu_run(int *h_pkts, int *d_pkts, int num_pkts)
{
struct timespec start, end;
int err = hipSuccess;
/**< Copy packets to device */
err = hipMemcpy(d_pkts, h_pkts, num_pkts * sizeof(int),
hipMemcpyHostToDevice);
CPE(err != hipSuccess, "Failed to copy to device memory\n", -1);
/**< Memcpy has completed: start timer */
clock_gettime(CLOCK_REALTIME, &start);
/**< Kernel launch */
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pkts, num_pkts);
err = hipGetLastError();
CPE(err != hipSuccess, "Failed to launch vectorAdd kernel\n", -1);
hipDeviceSynchronize();
/**< Kernel execution finished: stop timer */
clock_gettime(CLOCK_REALTIME, &end);
/**< Copy back the results */
err = hipMemcpy(h_pkts, d_pkts, num_pkts * sizeof(int),
hipMemcpyDeviceToHost);
CPE(err != hipSuccess, "Failed to copy C from device to host\n", -1);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#endif
int main(int argc, char *argv[])
{
int err = hipSuccess;
int i;
int *h_pkts_cpu;
/** <Separate packet buffer to compare GPU's result with the CPU's */
int *h_pkts_gpu, *d_pkts_gpu;
srand(time(NULL));
printDeviceProperties();
/** <Initialize a cudaStream for async calls */
err = hipStreamCreate(&myStream);
CPE(err != hipSuccess, "Failed to create cudaStream\n", -1);
/** <Initialize the packet arrays for CPU and GPU code */
h_pkts_cpu = (int *) malloc(MAX_PKTS * sizeof(int));
/** <The host packet-array for GPU code should be pinned */
err = hipHostMalloc((void **) &h_pkts_gpu, MAX_PKTS * sizeof(int));
err = hipMalloc((void **) &d_pkts_gpu, MAX_PKTS * sizeof(int));
/** <Test for different batch sizes */
assert(MAX_PKTS % 128 == 0);
for(int num_pkts = 16; num_pkts < MAX_PKTS; num_pkts *= 4) {
double cpu_time = 0, gpu_time = 0;
/** <Initialize packets */
for(i = 0; i < num_pkts; i ++) {
h_pkts_cpu[i] = rand();
h_pkts_gpu[i] = h_pkts_cpu[i];
}
/** Perform several measurements for averaging */
for(i = 0; i < ITERS; i ++) {
cpu_time += cpu_run(h_pkts_cpu, num_pkts);
gpu_time += gpu_run(h_pkts_gpu, d_pkts_gpu, num_pkts);
}
cpu_time = cpu_time / ITERS;
gpu_time = gpu_time / ITERS;
/** <Verify that the result vector is correct */
for(int i = 0; i < num_pkts; i ++) {
if (h_pkts_cpu[i] != h_pkts_gpu[i]) {
fprintf(stderr, "Result verification failed at element %d!\n", i);
fprintf(stderr, "CPU %d, GPU %d\n", h_pkts_cpu[i], h_pkts_gpu[i]);
exit(-1);
}
}
printf("Test PASSED for num_pkts = %d\n", num_pkts);
printf("num_pkts %d CPU %.2f GPU %.2f (million hashes per second)\n",
num_pkts,
num_pkts / (cpu_time * 1000000),
num_pkts / (gpu_time * 1000000));
/** <Emit the results to stderr. Use only space for delimiting */
fprintf(stderr, "Batch size %d CPU %f GPU %f CPU/GPU %f\n",
num_pkts, cpu_time, gpu_time, cpu_time / gpu_time);
printf("\n");
}
// Free device memory
hipFree(d_pkts_gpu);
// Free host memory
free(h_pkts_cpu);
hipHostFree(h_pkts_gpu);
// Reset the device and exit
err = hipDeviceReset();
CPE(err != hipSuccess, "Failed to de-initialize the device\n", -1);
printf("Done\n");
return 0;
}
| a341b0cc6114c0c18f18d6170815e29129a59b56.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <time.h>
#include "common.h"
extern "C" {
#include "city.h"
}
cudaStream_t myStream;
/** < Functions for hashing from within a CUDA kernel */
static const uint32_t cu_c1 = 0xcc9e2d51;
static const uint32_t cu_c2 = 0x1b873593;
__device__ uint32_t cu_Rotate32(uint32_t val, int shift)
{
return shift == 0 ? val : ((val >> shift) | (val << (32 - shift)));
}
__device__ uint32_t cu_Mur(uint32_t a, uint32_t h)
{
a *= cu_c1;
a = cu_Rotate32(a, 17);
a *= cu_c2;
h ^= a;
h = cu_Rotate32(h, 19);
return h * 5 + 0xe6546b64;
}
__device__ uint32_t cu_fmix(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
__device__ uint32_t Hash32Len0to4(char *s, int len)
{
uint32_t b = 0;
uint32_t c = 9;
int i;
for(i = 0; i < len; i++) {
b = b * cu_c1 + s[i];
c ^= b;
}
return cu_fmix(cu_Mur(b, cu_Mur(len, c)));
}
/** < Hashing functions for CUDA kernels end here */
__global__ void
vectorAdd(int *pkts, int num_pkts)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_pkts) {
pkts[i] = Hash32Len0to4((char *) &pkts[i], 4);
}
}
double cpu_run(int *pkts, int num_pkts)
{
int i;
struct timespec start, end;
clock_gettime(CLOCK_REALTIME, &start);
for(i = 0; i < num_pkts; i += 1) {
pkts[i] = CityHash32((char *) &pkts[i], 4);
}
clock_gettime(CLOCK_REALTIME, &end);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#if INCLUDE_COPY_TIME == 1
/**< Include copy overhead in measurement */
double gpu_run(int *h_pkts, int *d_pkts, int num_pkts)
{
struct timespec start, end;
int err = cudaSuccess;
clock_gettime(CLOCK_REALTIME, &start);
/**< Copy packets to device */
err = cudaMemcpyAsync(d_pkts, h_pkts, num_pkts * sizeof(int),
cudaMemcpyHostToDevice, myStream);
CPE(err != cudaSuccess, "Failed to copy to device memory\n", -1);
/**< Kernel launch */
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
vectorAdd<<<blocksPerGrid, threadsPerBlock, 0, myStream>>>(d_pkts,
num_pkts);
err = cudaGetLastError();
CPE(err != cudaSuccess, "Failed to launch vectorAdd kernel\n", -1);
/**< Copy back the results */
err = cudaMemcpyAsync(h_pkts, d_pkts, num_pkts * sizeof(int),
cudaMemcpyDeviceToHost, myStream);
CPE(err != cudaSuccess, "Failed to copy C from device to host\n", -1);
/**< Wait for all stream ops to complete */
cudaStreamSynchronize(myStream);
clock_gettime(CLOCK_REALTIME, &end);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#else
/**< Don't include copy overhead in measurement */
double gpu_run(int *h_pkts, int *d_pkts, int num_pkts)
{
struct timespec start, end;
int err = cudaSuccess;
/**< Copy packets to device */
err = cudaMemcpy(d_pkts, h_pkts, num_pkts * sizeof(int),
cudaMemcpyHostToDevice);
CPE(err != cudaSuccess, "Failed to copy to device memory\n", -1);
/**< Memcpy has completed: start timer */
clock_gettime(CLOCK_REALTIME, &start);
/**< Kernel launch */
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_pkts, num_pkts);
err = cudaGetLastError();
CPE(err != cudaSuccess, "Failed to launch vectorAdd kernel\n", -1);
cudaDeviceSynchronize();
/**< Kernel execution finished: stop timer */
clock_gettime(CLOCK_REALTIME, &end);
/**< Copy back the results */
err = cudaMemcpy(h_pkts, d_pkts, num_pkts * sizeof(int),
cudaMemcpyDeviceToHost);
CPE(err != cudaSuccess, "Failed to copy C from device to host\n", -1);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#endif
int main(int argc, char *argv[])
{
int err = cudaSuccess;
int i;
int *h_pkts_cpu;
/** <Separate packet buffer to compare GPU's result with the CPU's */
int *h_pkts_gpu, *d_pkts_gpu;
srand(time(NULL));
printDeviceProperties();
/** <Initialize a cudaStream for async calls */
err = cudaStreamCreate(&myStream);
CPE(err != cudaSuccess, "Failed to create cudaStream\n", -1);
/** <Initialize the packet arrays for CPU and GPU code */
h_pkts_cpu = (int *) malloc(MAX_PKTS * sizeof(int));
/** <The host packet-array for GPU code should be pinned */
err = cudaMallocHost((void **) &h_pkts_gpu, MAX_PKTS * sizeof(int));
err = cudaMalloc((void **) &d_pkts_gpu, MAX_PKTS * sizeof(int));
/** <Test for different batch sizes */
assert(MAX_PKTS % 128 == 0);
for(int num_pkts = 16; num_pkts < MAX_PKTS; num_pkts *= 4) {
double cpu_time = 0, gpu_time = 0;
/** <Initialize packets */
for(i = 0; i < num_pkts; i ++) {
h_pkts_cpu[i] = rand();
h_pkts_gpu[i] = h_pkts_cpu[i];
}
/** Perform several measurements for averaging */
for(i = 0; i < ITERS; i ++) {
cpu_time += cpu_run(h_pkts_cpu, num_pkts);
gpu_time += gpu_run(h_pkts_gpu, d_pkts_gpu, num_pkts);
}
cpu_time = cpu_time / ITERS;
gpu_time = gpu_time / ITERS;
/** <Verify that the result vector is correct */
for(int i = 0; i < num_pkts; i ++) {
if (h_pkts_cpu[i] != h_pkts_gpu[i]) {
fprintf(stderr, "Result verification failed at element %d!\n", i);
fprintf(stderr, "CPU %d, GPU %d\n", h_pkts_cpu[i], h_pkts_gpu[i]);
exit(-1);
}
}
printf("Test PASSED for num_pkts = %d\n", num_pkts);
printf("num_pkts %d CPU %.2f GPU %.2f (million hashes per second)\n",
num_pkts,
num_pkts / (cpu_time * 1000000),
num_pkts / (gpu_time * 1000000));
/** <Emit the results to stderr. Use only space for delimiting */
fprintf(stderr, "Batch size %d CPU %f GPU %f CPU/GPU %f\n",
num_pkts, cpu_time, gpu_time, cpu_time / gpu_time);
printf("\n");
}
// Free device memory
cudaFree(d_pkts_gpu);
// Free host memory
free(h_pkts_cpu);
cudaFreeHost(h_pkts_gpu);
// Reset the device and exit
err = cudaDeviceReset();
CPE(err != cudaSuccess, "Failed to de-initialize the device\n", -1);
printf("Done\n");
return 0;
}
|
5e39de117f36d383783c64c49adcd2b4c18ebbfb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cube(float * d_out, float * d_in){
// Todo: Fill in this function
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f*f;
} | 5e39de117f36d383783c64c49adcd2b4c18ebbfb.cu | #include "includes.h"
__global__ void cube(float * d_out, float * d_in){
// Todo: Fill in this function
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f*f;
} |
f1a885987219a0283acf01f203c6473f969c42d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//Udacity HW 4
//Radix Sorting
__global__ void scanBlks(unsigned int *in, unsigned int *out, unsigned int n, unsigned int *blkSums)
{
extern __shared__ int blkData[];
int i1 = blockIdx.x * 2 * blockDim.x + threadIdx.x;
int i2 = i1 + blockDim.x;
if (i1 < n)
blkData[threadIdx.x] = in[i1];
if (i2 < n)
blkData[threadIdx.x + blockDim.x] = in[i2];
__syncthreads();
for (int stride = 1; stride < 2 * blockDim.x; stride *= 2)
{
int blkDataIdx = (threadIdx.x + 1) * 2 * stride - 1;
if (blkDataIdx < 2 * blockDim.x)
blkData[blkDataIdx] += blkData[blkDataIdx - stride];
__syncthreads();
}
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
int blkDataIdx = (threadIdx.x + 1) * 2 * stride - 1 + stride;
if (blkDataIdx < 2 * blockDim.x)
blkData[blkDataIdx] += blkData[blkDataIdx - stride];
__syncthreads();
}
if (i1 < n)
out[i1] = blkData[threadIdx.x];
if (i2 < n)
out[i2] = blkData[threadIdx.x + blockDim.x];
if (blkSums != NULL && threadIdx.x == 0)
blkSums[blockIdx.x] = blkData[2 * blockDim.x - 1];
} | f1a885987219a0283acf01f203c6473f969c42d2.cu | #include "includes.h"
//Udacity HW 4
//Radix Sorting
__global__ void scanBlks(unsigned int *in, unsigned int *out, unsigned int n, unsigned int *blkSums)
{
extern __shared__ int blkData[];
int i1 = blockIdx.x * 2 * blockDim.x + threadIdx.x;
int i2 = i1 + blockDim.x;
if (i1 < n)
blkData[threadIdx.x] = in[i1];
if (i2 < n)
blkData[threadIdx.x + blockDim.x] = in[i2];
__syncthreads();
for (int stride = 1; stride < 2 * blockDim.x; stride *= 2)
{
int blkDataIdx = (threadIdx.x + 1) * 2 * stride - 1;
if (blkDataIdx < 2 * blockDim.x)
blkData[blkDataIdx] += blkData[blkDataIdx - stride];
__syncthreads();
}
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
int blkDataIdx = (threadIdx.x + 1) * 2 * stride - 1 + stride;
if (blkDataIdx < 2 * blockDim.x)
blkData[blkDataIdx] += blkData[blkDataIdx - stride];
__syncthreads();
}
if (i1 < n)
out[i1] = blkData[threadIdx.x];
if (i2 < n)
out[i2] = blkData[threadIdx.x + blockDim.x];
if (blkSums != NULL && threadIdx.x == 0)
blkSums[blockIdx.x] = blkData[2 * blockDim.x - 1];
} |
2cc94c5eecaf916e8ab5da2cb42a5ec544a1b022.hip | // !!! This is a file automatically generated by hipify!!!
//STL includes
#include <iostream>
#include <vector>
#include <time.h>
#include <cmath>
#include <chrono>
#include <iomanip>
//Eigen includes
#include <Eigen/Dense>
#include <Eigen/Sparse>
//Boost
#include "boost/program_options.hpp"
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
//My own includes
#include "global_params.h"
#include "input_file_prep.h"
#include "BZ_CUDA_UTIL.h"
#include "base_layer.h"
#include "gpu_info_struct.h"
#include "custom_kernels.h"
#include "model.h"
#include "fileHelper.h"
#include "Eigen_Util.h"
#include "model.hpp"
#include "base_layer.hpp"
#include "LSTM.hpp"
#include "softmax.hpp"
#include "Input_To_Hidden_Layer.hpp"
#include "Hidden_To_Hidden_Layer.hpp"
#include "LSTM_HH.hpp"
//parse the command line from the user
void command_line_parse(global_params ¶ms,int argc, char **argv) {
//files for keeping the user input
//if not s, 1st source, 2nd target, 3rd output weights name
//if s, 1st target, 2nd output weights name
std::vector<std::string> train_files;
//files for force decoding
//if not s, 1. source input file 2. target input file 3. neural network file name 4. output file name
//if s, 1. target input file 2. neural network file name 3. output file name
std::vector<std::string> test_files;
//stuff for adaptive learning rate schedule
//if not seq , 1st is source dev, 2nd is target dev
//if seq 1st is target dev
std::vector<std::string> adaptive_learning_rate;
//lower and upper range for parameter initialization
std::vector<precision> lower_upper_range;
//for the kbest flag, 4 arguements must be entered for kbest, 1. number of best paths 2 input file name
//3. neural network file name (this is the output file you get after training the neural network)4. output file name
std::vector<std::string> kbest_files;
//for stoic gen, 1st neural network file, 2nd is output file name
std::vector<std::string> stoicgen_files;
//truncated softmax
std::vector<std::string> trunc_info;
//for decoding ratios
std::vector<precision> decoding_ratio;
//for continuing to train
std::vector<std::string> cont_train;
//basic format setup
namespace po = boost::program_options;
po::options_description desc("Options");
desc.add_options()
("help,h", "Run to get help on how to use the program")
("train,t",po::value<std::vector<std::string> > (&train_files)->multitoken(),"Train a model with input data file(s) and a name for the neural network output file"\
". \nFORMAT (if sequence to sequence): <source file name> <target file name> <neural network output name> "\
" \nFORMAT (if sequence): <target file name> <neural network output name>")
("cont-train,C",po::value<std::vector<std::string>> (&cont_train)->multitoken(),"Resume training of a model (THIS WILL OVERWRITE THE MODEL FILE)\n"\
"FORMAT: (if sequence to sequence): <source file name> <target file name> <neural network file name>\n"\
"FORMAT: (if seq): <target file name> <neural network file name>")
("force-decode,f",po::value<std::vector<std::string> > (&test_files)->multitoken(), "Get per line probability of dataset plus the perplexity\n"\
"FORMAT: (if sequence to sequence): <source file name> <target file name> <trained neural network file name> <output file name>\n"\
"FORMAT: (if sequence): <target file name> <trained neural network file name> <output file name>")
("stoch-gen,g", po::value<std::vector<std::string> > (&stoicgen_files)->multitoken(),"Do random generation for a sequence model, such as a language model\n"\
"FORMAT: <neural network file name> <output file name>")
("stoch-gen-len",po::value<int>(¶ms.sg_length) ,"How many sentences to let stoch-gen run for\n"\
"FORMAT: <num sentences>\n"
"DEFAULT: 100")
("temperature",po::value<double>(¶ms.temperature) ,"What should the temperature be for the stoch generation"\
"FORMAT: <temperature> where temperature is typically between [0,1]. A lower temperature makes the model output less and less from what it memorized from training\n"\
"DEFAULT: 1")
("sequence,s", "Train model that learns a sequence,such as language modeling. Default model is sequence to sequence model")
("learning-rate,l",po::value<precision>(¶ms.learning_rate),"Set the learning rate\n DEFAULT: 0.7")
("longest-sent,L",po::value<int>(¶ms.longest_sent),"Set the maximum sentence length for training.\n DEFAULT: 100")
("hiddenstate-size,H",po::value<int>(¶ms.LSTM_size),"Set hiddenstate size \n DEFAULT: 1000")
("truncated-softmax,T",po::value<std::vector<std::string>> (&trunc_info)->multitoken(),"Use truncated softmax\n DEFAULT: not being used\n"\
"FORMAT: <shortlist size> <sampled size>")
("source-vocab,v",po::value<int>(¶ms.source_vocab_size),"Set source vocab size\n DEFAULT: number of unique words in source training corpus")
("target-vocab,V",po::value<int>(¶ms.target_vocab_size),"Set target vocab size\n DEFAULT: number of unique words in target training corpus")
("shuffle",po::value<bool>(¶ms.shuffle),"true if you want to shuffle the train data\n DEFAULT: true")
("parameter-range,P",po::value<std::vector<precision> > (&lower_upper_range)->multitoken(),"parameter initialization range\n"\
"FORMAT: <Lower range value> <Upper range value>\n DEFAULT: -0.08 0.08")
("number-epochs,n",po::value<int>(¶ms.num_epochs),"Set number of epochs\n DEFAULT: 10")
("clip-gradients,c",po::value<precision>(¶ms.norm_clip),"Set gradient clipping threshold\n DEFAULT: 5")
("adaptive-halve-lr,a",po::value<std::vector<std::string>> (&adaptive_learning_rate)->multitoken(),"change the learning rate"\
" when the perplexity on your specified dev set decreases from the previous half epoch by some constant, so "\
" new_learning_rate = constant*old_learning rate, by default the constant is 0.5, but can be set using adaptive-decrease-factor\n"
"FORMAT: (if sequence to sequence): <source dev file name> <target dev file name>\n"\
"FORMAT: (if sequence): <target dev file name>")
("adaptive-decrease-factor,A",po::value<precision>(¶ms.decrease_factor),"To be used with adaptive-halve-lr"\
" it\n DEFAULT: 0.5")
("fixed-halve-lr",po::value<int> (¶ms.epoch_to_start_halving),"Halve the learning rate"\
" after a certain epoch, every half epoch afterwards by a specific amount")
("minibatch-size,m",po::value<int>(¶ms.minibatch_size),"Set minibatch size\n DEFAULT: 128")
("screen-print-rate",po::value<int>(¶ms.screen_print_rate),"Set after how many minibatched you want to print training info to the screen\n DEFAULT: 5")
("HPC-output",po::value<std::string>(¶ms.HPC_output_file_name),"Use if you want to have the terminal output also be put to a" \
"file \n FORMAT: <file name>")
("best-model,B",po::value<std::string>(¶ms.best_model_file_name),"During train have the best model be written to a file\nFORMAT: <output file name>")
("kbest,k",po::value<std::vector<std::string> > (&kbest_files)->multitoken(),"Get k best paths in sequence to sequence model\n"\
"FORMAT: <how many paths> <source file name> <neural network file name> <output file name>")
("beam-size,b",po::value<int>(¶ms.beam_size),"Set beam size for kbest paths\n DEFAULT: 12")
("penalty,p",po::value<precision>(¶ms.penalty),"Set penalty for kbest decoding. The value entered"\
" will be added to the log probability score per target word decoded. This can make the model favor longer sentences for decoding\n DEFAULT: 0")
("print-score",po::value<bool>(¶ms.print_score),"Set if you want to print out the unnormalized log prob for each path "\
"FORMAT: <bool> \nthe bool is 1 if you want to print the score or 0 otherwise.\n DEFAULT: false")
("dec-ratio",po::value<std::vector<precision>>(&decoding_ratio)->multitoken(),"Set the min and max decoding length rations\n"\
"This means that a target decoded sentence must be at least min_dec_ratio*len(source sentence)"\
" and not longer than max_dec_ratio*len(source sentence)\nFORMAT: <min ration> <max ratio>\n"\
"DEFAULT: 0.5, 1.5")
("Dump-LSTM",po::value<std::string>(¶ms.LSTM_dump_file),"Print the output at each timestep from the LSTM\nFORMAT: <output file name>\n"\
"The file lines that are output are the following: 1.input word, embedding 2.Forget gate 3.input gate"\
" 4.c_t 5.output gate 6.h_t 7.probabilities");
po::variables_map vm;
try {
po::store(po::parse_command_line(argc, argv, desc), vm);
po::notify(vm);
//see if the user specified the help flag
if ( vm.count("help") ) {
std::cout << "\n------------------------------\n";
std::cout << "This is Barret Zoph's GPU RNN library\n"
<< "The flags for the command line interface are below\n"
<< "" << "\n";
std::cout << desc << "\n";
exit (EXIT_FAILURE);
}
//error checks to be sure only once of these options is set
if (vm.count("train") && vm.count("kbest")) {
std::cout << "ERROR: you cannot train and get kbest at the same time\n";
exit (EXIT_FAILURE);
}
if (vm.count("train") && vm.count("force-decode")) {
std::cout << "ERROR: you cannot train and force-decode at the same time\n";
exit (EXIT_FAILURE);
}
if (vm.count("force-decode") && vm.count("kbest")) {
std::cout << "ERROR: you cannot force-decode and get kbest at the same time\n";
exit (EXIT_FAILURE);
}
if (!(vm.count("train") || vm.count("force-decode") || vm.count("kbest")||vm.count("stoch-gen") || vm.count("cont-train") )) {
std::cout << "ERROR: you must either train,continue training,get kbest,stoch generate data or force-decode\n";
exit (EXIT_FAILURE);
}
params.longest_sent+=4; //because it is really 4 less
if(vm.count("train") || vm.count("cont-train")) {
//some basic error checks to parameters
if(params.learning_rate<=0) {
std::cout << "ERROR: you cannot have a learning rate <=0\n";
exit (EXIT_FAILURE);
}
if(params.minibatch_size<=0) {
std::cout << "ERROR: you cannot have a minibatch of size <=0\n";
exit (EXIT_FAILURE);
}
if(params.LSTM_size<=0) {
std::cout << "ERROR: you cannot have a hiddenstate of size <=0\n";
exit (EXIT_FAILURE);
}
if(params.source_vocab_size<=0) {
if(params.source_vocab_size!=-1) {
std::cout << "ERROR: you cannot have a source_vocab_size <=0\n";
exit (EXIT_FAILURE);
}
}
if(params.target_vocab_size<=0) {
if(params.target_vocab_size!=-1) {
std::cout << "ERROR: you cannot have a target_vocab_size <=0\n";
exit (EXIT_FAILURE);
}
}
if(params.norm_clip<=0) {
std::cout << "ERROR: you cannot have your norm clip <=0\n";
exit (EXIT_FAILURE);
}
if(params.num_epochs<=0) {
std::cout << "ERROR: you cannot have num_epochs <=0\n";
exit (EXIT_FAILURE);
}
if(vm.count("HPC-output")) {
params.HPC_output = true;
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.train_file_name = params.unique_dir+"/train.txt";
if(vm.count("cont-train")) {
//sequence model
if(vm.count("sequence")) {
if(cont_train.size()!=2) {
std::cout << cont_train.size() << "\n";
std::cout << "ERROR: two arguements to be supplied to the continue train flag\n"\
" 1. train data file name, 2. neural network file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.target_file_name = cont_train[0];
params.input_weight_file = cont_train[1];
params.output_weight_file = cont_train[1];
params.LM = true;
params.load_model_train = true;
params.load_model_name = params.input_weight_file;
input_file_prep input_helper;
input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.train_file_name,
params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size);
}
else {
if(cont_train.size()!=3) {
std::cout << "ERROR: three arguements to be supplied to the continue train flag\n"\
" 1. source train data file name 2. target train data file name 3. neural network file name \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = cont_train[0];
params.target_file_name = cont_train[1];
params.input_weight_file = cont_train[2];
params.output_weight_file = cont_train[2];
params.load_model_train = true;
params.load_model_name = params.input_weight_file;
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name,
params.target_file_name,params.train_file_name,params.longest_sent,params.minibatch_size,params.LSTM_size,
params.source_vocab_size,params.target_vocab_size);
}
}
else {
//now create the necessary files
if(vm.count("sequence")) {
if(train_files.size()!=2) {
std::cout << "ERROR: two arguements to be supplied to the train flag"\
" 1. train data file name, 2. neural network output name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = true;
params.target_file_name = train_files[0];
params.output_weight_file = train_files[1];
input_file_prep input_helper;
//this outputs the train.txt file along with the mappings and first line
bool success = input_helper.prep_files_train_LM(params.minibatch_size,params.longest_sent,
params.target_file_name,
params.train_file_name,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size);
//clean up if error
if(!success) {
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
else {
//then sequence to sequence model
if(train_files.size()!=3) {
std::cout << train_files.size() <<"\n";
std::cout << "ERROR: three arguements to be supplied to the train flag for the sequence to sequence model\n"\
" 1. source train data file name\n 2. target train data file name \n3. neural network output name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = train_files[0];
params.target_file_name = train_files[1];
params.output_weight_file = train_files[2];
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
bool success = input_helper.prep_files_train_nonLM(params.minibatch_size,params.longest_sent,
params.source_file_name,params.target_file_name,
params.train_file_name,params.source_vocab_size,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size);
//clean up if error
if(!success) {
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
}
if(vm.count("parameter-range")) {
if(lower_upper_range.size()!=2) {
std::cout << "ERROR: you must have two inputs to parameter-range\n1.lower bound\n2. upper bound\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
precision temp_lower = lower_upper_range[0];
precision temp_upper = lower_upper_range[1];
if(temp_lower >= temp_upper) {
std::cout << "ERROR: the lower parameter range cannot be greater than the upper range\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.lower_range = temp_lower;
params.upper_range = temp_upper;
}
if(vm.count("fixed-halve-lr")) {
params.google_learning_rate = true;
if(params.epoch_to_start_halving<=0) {
std::cout << "ERROR: cannot halve learning rate until 1st epoch \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
if(vm.count("adaptive-halve-lr")) {
params.learning_rate_schedule = true;
if(vm.count("sequence")) {
if(adaptive_learning_rate.size()!=1) {
std::cout << "ERROR: adaptive-halve-lr takes one arguement\n1.dev file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.dev_target_file_name = adaptive_learning_rate[0];
params.test_file_name = params.unique_dir + "/validation.txt";
input_file_prep input_helper;
input_helper.integerize_file_LM(params.output_weight_file,params.dev_target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size);
}
else {
if(adaptive_learning_rate.size()!=2) {
std::cout << "ERROR: adaptive-halve-lr takes two arguements\n1.source dev file name\n2.target dev file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.dev_source_file_name = adaptive_learning_rate[0];
params.dev_target_file_name = adaptive_learning_rate[1];
params.test_file_name = params.unique_dir + "/validation.txt";
if(params.dev_source_file_name == params.dev_target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.output_weight_file,params.dev_source_file_name,
params.dev_target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,params.LSTM_size,params.source_vocab_size,params.target_vocab_size);
}
if(vm.count("best-model")) {
params.best_model = true;
}
}
if(vm.count("truncated-softmax")) {
params.shortlist_size = std::stoi(trunc_info[0]);
params.sampled_size = std::stoi(trunc_info[1]);
params.truncated_softmax = true;
if(params.shortlist_size + params.sampled_size > params.target_vocab_size) {
std::cout << "ERROR: you cannot have shortlist size + sampled size >= target vocab size\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
params.train= true;
params.decode=false;
params.test = false;
params.stochastic_generation = false;
return;
}
if(vm.count("kbest")) {
if (kbest_files.size()!=4) {
std::cout << "ERROR: 4 arguements must be entered for kbest, 1. number of best paths"\
" 2 input file name "
" 3. neural network file name (this is the output file you get after training the neural network)"\
" 4. output file name\n";
exit (EXIT_FAILURE);
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.decode_file_name = params.unique_dir+"/decoder_input.txt";
params.decoder_output_file = params.unique_dir+"/decoder_output.txt";
params.num_hypotheses =std::stoi(kbest_files[0]);
params.decode_tmp_file = kbest_files[1];
params.input_weight_file = kbest_files[2];
params.decoder_final_file = kbest_files[3];
input_file_prep input_helper;
// input_helper.integerize_file_LM(params.input_weight_file,params.decode_tmp_file,"tmp/decoder_input.txt",
// params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size,true,params.source_vocab_size);
input_helper.integerize_file_kbest(params.input_weight_file,params.decode_tmp_file,params.decode_file_name,
params.longest_sent,params.LSTM_size,params.target_vocab_size,params.source_vocab_size);
if(params.beam_size<=0) {
std::cout << "ERROR: beam size cannot be <=0\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
if(params.penalty<0) {
std::cout << "ERROR: penalty cannot be less than zero\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
if(vm.count("Dump-LSTM")) {
params.dump_LSTM=true;
}
if(vm.count("dec-ratio")) {
if(decoding_ratio.size()!=2) {
std::cout << "Decoding ratio size: " << decoding_ratio.size() << "\n";
std::cout << decoding_ratio[0] << "\n";
std::cout << "ERROR: only two inputs for decoding ratio\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.min_decoding_ratio = decoding_ratio[0];
params.max_decoding_ratio = decoding_ratio[1];
if(params.min_decoding_ratio >= params.max_decoding_ratio) {
std::cout << "ERROR: min decoding ratio must be <= max_decoding_ratio\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
params.train = false;
params.decode = true;
params.test = false;
params.stochastic_generation = false;
params.LM = false;
return;
}
if(vm.count("force-decode")) {
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.test_file_name = params.unique_dir + "/validation.txt";
if(vm.count("sequence")) {
if(test_files.size()!=3) {
std::cout << "ERROR: force-decode takes three arguements 1.input file name (input sentences)"\
"2. neural network file name 3.output file name \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.target_file_name = test_files[0];
params.input_weight_file = test_files[1];
params.output_force_decode = test_files[2];
params.LM = true;
input_file_prep input_helper;
input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.test_file_name,
params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size);
}
else {
if(test_files.size()!=4) {
std::cout << "ERROR: force-decode takes four arguements: 1. source input file"\
" 2. target input file 3. neural network file name 4. output file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = test_files[0];
params.target_file_name = test_files[1];
params.input_weight_file = test_files[2];
params.output_force_decode = test_files[3];
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name,
params.target_file_name,params.test_file_name,params.longest_sent,1,params.LSTM_size,
params.source_vocab_size,params.target_vocab_size);
}
params.train= false;
params.decode=false;
params.test = true;
params.minibatch_size=1;
params.stochastic_generation = false;
return;
}
if(vm.count("stoch-gen")) {
if(!vm.count("sequence")) {
std::cout << "ERROR: you can only do stoch-gen on the sequence model\n";
exit (EXIT_FAILURE);
}
if(stoicgen_files.size()!=2) {
std::cout << "ERROR: stoch-gen takes two inputs"\
" 1. neural network file name 2. output file name\n";
exit (EXIT_FAILURE);
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.sg_output_file_temp = params.unique_dir + "/sg.txt";
params.input_weight_file = stoicgen_files[0];
params.sg_output_file = stoicgen_files[1];
std::ifstream weights_file;
std::vector<std::string> info;
std::string str;
std::string word;
weights_file.open(params.input_weight_file.c_str());
weights_file.seekg(0, std::ios::beg);
std::getline(weights_file, str); //info from first sentence
std::istringstream iss(str, std::istringstream::in);
while(iss >> word) {
info.push_back(word);
}
weights_file.close();
params.LSTM_size = std::stoi(info[1]);
params.target_vocab_size = std::stoi(info[2]);
params.LM = true;
params.train= false;
params.decode = false;
params.test = false;
params.minibatch_size = 1;
params.stochastic_generation = true;
return;
}
}
catch(po::error& e) {
std::cerr << "ERROR: " << e.what() << std::endl << std::endl;
//std::cerr << desc << std::endl;
exit (EXIT_FAILURE);
}
}
int main(int argc, char **argv) {
//Timing stuff
std::chrono::time_point<std::chrono::system_clock> start_total,
end_total, begin_minibatch,end_minibatch,begin_decoding,end_decoding;
std::chrono::duration<double> elapsed_seconds;
start_total = std::chrono::system_clock::now();
//Initializing the model
global_params params; //Declare all of the global parameters
//create tmp directory if it does not exist already
// if( !(boost::filesystem::exists("tmp/"))) {
// std::cout << "Creating tmp directory for program\n";
// boost::filesystem::create_directory("tmp/");
// }
//file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file); //Initialize the file information
//get the command line arguements
command_line_parse(params,argc,argv);
neuralMT_model<precision> model; //This is the model
params.printIntroMessage();
BZ_CUDA::lower = params.lower_range;
BZ_CUDA::upper = params.upper_range;
if(params.google_learning_rate && params.learning_rate_schedule) {
std::cout << "ERROR: do not select both the fixed learning rate schedule and the perplexity based scheduler";
std::cout << "I Guarantee this is not what you intended to do\n";
exit (EXIT_FAILURE);
}
if(!params.decode) {
model.initModel(params.LSTM_size,params.minibatch_size,params.source_vocab_size,params.target_vocab_size,
params.longest_sent,params.debug,params.learning_rate,params.clip_gradient,params.norm_clip,
params.input_weight_file,params.output_weight_file,params.softmax_scaled,params.train_perplexity,params.truncated_softmax,
params.shortlist_size,params.sampled_size,params.LM);
}
if(params.load_model_train) {
std::string temp_swap_weights = model.input_weight_file;
model.input_weight_file = params.load_model_name;
model.load_weights();
model.input_weight_file = temp_swap_weights;
}
std::ofstream HPC_output;
if(params.HPC_output) {
HPC_output.open("HPC_OUTPUT.txt");
}
////////////////////////////////////Train the model//////////////////////////////////////
if(params.train) {
//info for averaging the speed
int curr_batch_num_SPEED = 0;
const int thres_batch_num_SPEED = params.screen_print_rate;//set this to whatever
int total_words_batch_SPEED = 0;
double total_batch_time_SPEED = 0;
//File info for the training file
file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,params.train_total_words,params.truncated_softmax,
params.shortlist_size,params.sampled_size); //Initialize the file information
//model.initFileInfo(&file_info);
params.half_way_count = params.train_total_words/2;
if(params.google_learning_rate) {
std::cout << "Words at which to start halving the learning rate: " << params.half_way_count << "\n";
if(params.HPC_output) {
HPC_output << "Words at which to start halving the learning rate: " << params.half_way_count << "\n";
HPC_output.flush();
}
}
int current_epoch = 1;
std::cout << "Starting model training\n";
std::cout << "Starting epoch 1\n";
if(params.HPC_output) {
HPC_output << "Starting model training\n";
HPC_output << "Starting epoch 1\n";
HPC_output.flush();
}
//stuff for learning rate schedule
int total_words = 0;
precision temp_learning_rate = params.learning_rate; //This is only for the google learning rate
bool learning_rate_flag =true;//used for google learning rate for halving at every 0.5 epochs
double old_perplexity = 0;
model.train_perplexity = 0; //set the model perplexity to zero
while(current_epoch <= params.num_epochs) {
begin_minibatch = std::chrono::system_clock::now();
bool success = file_info.read_minibatch();
end_minibatch = std::chrono::system_clock::now();
elapsed_seconds = end_minibatch-begin_minibatch;
//std::cout << "File I/O time: " << elapsed_seconds.count()/60.0 << " minutes\n";
total_batch_time_SPEED+= elapsed_seconds.count();
begin_minibatch = std::chrono::system_clock::now();
//hipProfilerStart();
model.compute_gradients(file_info.minibatch_tokens_source_input,file_info.minibatch_tokens_source_output,
file_info.minibatch_tokens_target_input,file_info.minibatch_tokens_target_output,
file_info.h_input_vocab_indicies_source,file_info.h_output_vocab_indicies_source,
file_info.h_input_vocab_indicies_target,file_info.h_output_vocab_indicies_target,
file_info.current_source_length,file_info.current_target_length,
file_info.h_input_vocab_indicies_source_Wgrad,file_info.h_input_vocab_indicies_target_Wgrad,
file_info.len_source_Wgrad,file_info.len_target_Wgrad,file_info.h_sampled_indices,
file_info.len_unique_words_trunc_softmax);
// hipProfilerStop();
// return 0;
end_minibatch = std::chrono::system_clock::now();
elapsed_seconds = end_minibatch-begin_minibatch;
total_batch_time_SPEED+= elapsed_seconds.count();
total_words_batch_SPEED+=file_info.words_in_minibatch;
if(curr_batch_num_SPEED>=thres_batch_num_SPEED) {
std::cout << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n";
std::cout << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n";
std::cout << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n";
std::cout << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n";
if(params.HPC_output) {
HPC_output << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n";
HPC_output << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n";
HPC_output << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n";
HPC_output << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n";
HPC_output.flush();
}
total_words_batch_SPEED = 0;
total_batch_time_SPEED = 0;
curr_batch_num_SPEED = 0;
}
curr_batch_num_SPEED++;
total_words += file_info.words_in_minibatch;
//stuff for google learning rate
if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving && total_words>=params.half_way_count &&
learning_rate_flag) {
temp_learning_rate = temp_learning_rate/2;
std::cout << "New Learning Rate: " << temp_learning_rate << "\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = false;
if(params.HPC_output) {
HPC_output << "New Learning Rate: " << temp_learning_rate << "\n";
HPC_output.flush();
}
}
//stuff for perplexity based learning schedule
if(params.learning_rate_schedule && total_words>=params.half_way_count &&learning_rate_flag) {
learning_rate_flag = false;
double new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,false,params.test_total_words,params.HPC_output,false,"");
std::cout << "Old dev set Perplexity: " << old_perplexity << "\n";
std::cout << "New dev set Perplexity: " << new_perplexity << "\n";
if(params.HPC_output) {
HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n";
HPC_output << "New dev set Perplexity: " << new_perplexity << "\n";
HPC_output.flush();
}
if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) {
temp_learning_rate = temp_learning_rate*params.decrease_factor;
model.update_learning_rate(temp_learning_rate);
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
//perplexity is better so output the best model file
if(params.best_model && params.best_model_perp > new_perplexity) {
std::cout << "Now outputting the new best model\n";
model.dump_best_model(params.best_model_file_name,params.output_weight_file);
if(params.HPC_output) {
HPC_output << "Now outputting the new best model\n";
HPC_output.flush();
}
params.best_model_perp = new_perplexity;
}
old_perplexity = new_perplexity;
}
if(!success) {
current_epoch+=1;
//stuff for google learning rate schedule
if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving) {
temp_learning_rate = temp_learning_rate/2;
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = true;
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
double new_perplexity;
if(params.google_learning_rate || params.learning_rate_schedule) {
new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,false,params.test_total_words,params.HPC_output,false,"");
}
//stuff for perplexity based learning schedule
if(params.learning_rate_schedule) {
std::cout << "Old dev set Perplexity: " << old_perplexity << "\n";
std::cout << "New dev set Perplexity: " << new_perplexity << "\n";
if(params.HPC_output) {
HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n";
HPC_output << "New dev set Perplexity: " << new_perplexity << "\n";
HPC_output.flush();
}
if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) {
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
temp_learning_rate = temp_learning_rate*params.decrease_factor;
model.update_learning_rate(temp_learning_rate);
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
}
//perplexity is better so output the best model file
if(params.best_model && params.best_model_perp > new_perplexity) {
std::cout << "Now outputting the new best model\n";
model.dump_best_model(params.best_model_file_name,params.output_weight_file);
if(params.HPC_output) {
HPC_output << "Now outputting the new best model\n";
HPC_output.flush();
}
params.best_model_perp = new_perplexity;
}
learning_rate_flag = true;
old_perplexity = new_perplexity;
}
if(params.train_perplexity) {
std::cout << "PData on train set:" << model.train_perplexity << "\n";
std::cout << "Total target words: " << file_info.total_target_words << "\n";
std::cout << "Training set perplexity: " << ::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n";
if(params.HPC_output) {
HPC_output << "Training set perplexity: " << ::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n";
HPC_output.flush();
}
model.train_perplexity = 0;
}
total_words=0;
if(current_epoch <= params.num_epochs) {
std::cout << "-----------------------------------" << std::endl;
std::cout << "Starting epoch " << current_epoch << std::endl;
std::cout << "-----------------------------------" << std::endl;
if(params.HPC_output) {
HPC_output << "-----------------------------------" << std::endl;
HPC_output << "Starting epoch " << current_epoch << std::endl;
HPC_output << "-----------------------------------" << std::endl;
HPC_output.flush();
}
}
}
hipDeviceSynchronize();
}
//Now that training is done, dump the weights
hipDeviceSynchronize();
model.dump_weights();
}
/////////////////////////////////Get perplexity on test set////////////////////////////////
if(params.test) {
model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,true,params.test_total_words,params.HPC_output,true,params.output_force_decode);
}
if(params.LM && params.stochastic_generation) {
model.stoicastic_generation(params.sg_length,params.sg_output_file_temp,params.temperature);
input_file_prep input_helper;
input_helper.unint_file(params.input_weight_file,params.sg_output_file_temp,params.sg_output_file,true,false);
}
///////////////////////////////////////////decode the model////////////////////////////////////////////
if(params.decode) {
std::cout << "-----------------Starting Decoding----------------\n";
begin_decoding = std::chrono::system_clock::now();
model.beam_decoder(params.beam_size,params.decode_file_name,
params.input_weight_file,params.decode_num_lines_in_file,params.source_vocab_size,
params.target_vocab_size,params.longest_sent,params.LSTM_size,params.penalty,
params.decoder_output_file,params.min_decoding_ratio,params.max_decoding_ratio,params.softmax_scaled,
params.num_hypotheses,params.print_score,params.dump_LSTM,params.LSTM_dump_file);
end_decoding = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end_decoding-begin_decoding;
std::cout << "Decoding time: " << elapsed_seconds.count()/60.0 << " minutes\n";
//now unintegerize the file
input_file_prep input_helper;
input_helper.unint_file(params.input_weight_file,params.decoder_output_file,params.decoder_final_file,false,true);
}
//remove the temp directory created
if(params.unique_dir!="NULL") {
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
}
//Compute the final runtime
end_total = std::chrono::system_clock::now();
elapsed_seconds = end_total-start_total;
std::cout << "\n\n\n";
std::cout << "Total Program Runtime: " << elapsed_seconds.count()/60.0 << " minutes" << std::endl;
}
| 2cc94c5eecaf916e8ab5da2cb42a5ec544a1b022.cu | //STL includes
#include <iostream>
#include <vector>
#include <time.h>
#include <cmath>
#include <chrono>
#include <iomanip>
//Eigen includes
#include <Eigen/Dense>
#include <Eigen/Sparse>
//Boost
#include "boost/program_options.hpp"
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
//My own includes
#include "global_params.h"
#include "input_file_prep.h"
#include "BZ_CUDA_UTIL.h"
#include "base_layer.h"
#include "gpu_info_struct.h"
#include "custom_kernels.h"
#include "model.h"
#include "fileHelper.h"
#include "Eigen_Util.h"
#include "model.hpp"
#include "base_layer.hpp"
#include "LSTM.hpp"
#include "softmax.hpp"
#include "Input_To_Hidden_Layer.hpp"
#include "Hidden_To_Hidden_Layer.hpp"
#include "LSTM_HH.hpp"
//parse the command line from the user
void command_line_parse(global_params ¶ms,int argc, char **argv) {
//files for keeping the user input
//if not s, 1st source, 2nd target, 3rd output weights name
//if s, 1st target, 2nd output weights name
std::vector<std::string> train_files;
//files for force decoding
//if not s, 1. source input file 2. target input file 3. neural network file name 4. output file name
//if s, 1. target input file 2. neural network file name 3. output file name
std::vector<std::string> test_files;
//stuff for adaptive learning rate schedule
//if not seq , 1st is source dev, 2nd is target dev
//if seq 1st is target dev
std::vector<std::string> adaptive_learning_rate;
//lower and upper range for parameter initialization
std::vector<precision> lower_upper_range;
//for the kbest flag, 4 arguements must be entered for kbest, 1. number of best paths 2 input file name
//3. neural network file name (this is the output file you get after training the neural network)4. output file name
std::vector<std::string> kbest_files;
//for stoic gen, 1st neural network file, 2nd is output file name
std::vector<std::string> stoicgen_files;
//truncated softmax
std::vector<std::string> trunc_info;
//for decoding ratios
std::vector<precision> decoding_ratio;
//for continuing to train
std::vector<std::string> cont_train;
//basic format setup
namespace po = boost::program_options;
po::options_description desc("Options");
desc.add_options()
("help,h", "Run to get help on how to use the program")
("train,t",po::value<std::vector<std::string> > (&train_files)->multitoken(),"Train a model with input data file(s) and a name for the neural network output file"\
". \nFORMAT (if sequence to sequence): <source file name> <target file name> <neural network output name> "\
" \nFORMAT (if sequence): <target file name> <neural network output name>")
("cont-train,C",po::value<std::vector<std::string>> (&cont_train)->multitoken(),"Resume training of a model (THIS WILL OVERWRITE THE MODEL FILE)\n"\
"FORMAT: (if sequence to sequence): <source file name> <target file name> <neural network file name>\n"\
"FORMAT: (if seq): <target file name> <neural network file name>")
("force-decode,f",po::value<std::vector<std::string> > (&test_files)->multitoken(), "Get per line probability of dataset plus the perplexity\n"\
"FORMAT: (if sequence to sequence): <source file name> <target file name> <trained neural network file name> <output file name>\n"\
"FORMAT: (if sequence): <target file name> <trained neural network file name> <output file name>")
("stoch-gen,g", po::value<std::vector<std::string> > (&stoicgen_files)->multitoken(),"Do random generation for a sequence model, such as a language model\n"\
"FORMAT: <neural network file name> <output file name>")
("stoch-gen-len",po::value<int>(¶ms.sg_length) ,"How many sentences to let stoch-gen run for\n"\
"FORMAT: <num sentences>\n"
"DEFAULT: 100")
("temperature",po::value<double>(¶ms.temperature) ,"What should the temperature be for the stoch generation"\
"FORMAT: <temperature> where temperature is typically between [0,1]. A lower temperature makes the model output less and less from what it memorized from training\n"\
"DEFAULT: 1")
("sequence,s", "Train model that learns a sequence,such as language modeling. Default model is sequence to sequence model")
("learning-rate,l",po::value<precision>(¶ms.learning_rate),"Set the learning rate\n DEFAULT: 0.7")
("longest-sent,L",po::value<int>(¶ms.longest_sent),"Set the maximum sentence length for training.\n DEFAULT: 100")
("hiddenstate-size,H",po::value<int>(¶ms.LSTM_size),"Set hiddenstate size \n DEFAULT: 1000")
("truncated-softmax,T",po::value<std::vector<std::string>> (&trunc_info)->multitoken(),"Use truncated softmax\n DEFAULT: not being used\n"\
"FORMAT: <shortlist size> <sampled size>")
("source-vocab,v",po::value<int>(¶ms.source_vocab_size),"Set source vocab size\n DEFAULT: number of unique words in source training corpus")
("target-vocab,V",po::value<int>(¶ms.target_vocab_size),"Set target vocab size\n DEFAULT: number of unique words in target training corpus")
("shuffle",po::value<bool>(¶ms.shuffle),"true if you want to shuffle the train data\n DEFAULT: true")
("parameter-range,P",po::value<std::vector<precision> > (&lower_upper_range)->multitoken(),"parameter initialization range\n"\
"FORMAT: <Lower range value> <Upper range value>\n DEFAULT: -0.08 0.08")
("number-epochs,n",po::value<int>(¶ms.num_epochs),"Set number of epochs\n DEFAULT: 10")
("clip-gradients,c",po::value<precision>(¶ms.norm_clip),"Set gradient clipping threshold\n DEFAULT: 5")
("adaptive-halve-lr,a",po::value<std::vector<std::string>> (&adaptive_learning_rate)->multitoken(),"change the learning rate"\
" when the perplexity on your specified dev set decreases from the previous half epoch by some constant, so "\
" new_learning_rate = constant*old_learning rate, by default the constant is 0.5, but can be set using adaptive-decrease-factor\n"
"FORMAT: (if sequence to sequence): <source dev file name> <target dev file name>\n"\
"FORMAT: (if sequence): <target dev file name>")
("adaptive-decrease-factor,A",po::value<precision>(¶ms.decrease_factor),"To be used with adaptive-halve-lr"\
" it\n DEFAULT: 0.5")
("fixed-halve-lr",po::value<int> (¶ms.epoch_to_start_halving),"Halve the learning rate"\
" after a certain epoch, every half epoch afterwards by a specific amount")
("minibatch-size,m",po::value<int>(¶ms.minibatch_size),"Set minibatch size\n DEFAULT: 128")
("screen-print-rate",po::value<int>(¶ms.screen_print_rate),"Set after how many minibatched you want to print training info to the screen\n DEFAULT: 5")
("HPC-output",po::value<std::string>(¶ms.HPC_output_file_name),"Use if you want to have the terminal output also be put to a" \
"file \n FORMAT: <file name>")
("best-model,B",po::value<std::string>(¶ms.best_model_file_name),"During train have the best model be written to a file\nFORMAT: <output file name>")
("kbest,k",po::value<std::vector<std::string> > (&kbest_files)->multitoken(),"Get k best paths in sequence to sequence model\n"\
"FORMAT: <how many paths> <source file name> <neural network file name> <output file name>")
("beam-size,b",po::value<int>(¶ms.beam_size),"Set beam size for kbest paths\n DEFAULT: 12")
("penalty,p",po::value<precision>(¶ms.penalty),"Set penalty for kbest decoding. The value entered"\
" will be added to the log probability score per target word decoded. This can make the model favor longer sentences for decoding\n DEFAULT: 0")
("print-score",po::value<bool>(¶ms.print_score),"Set if you want to print out the unnormalized log prob for each path "\
"FORMAT: <bool> \nthe bool is 1 if you want to print the score or 0 otherwise.\n DEFAULT: false")
("dec-ratio",po::value<std::vector<precision>>(&decoding_ratio)->multitoken(),"Set the min and max decoding length rations\n"\
"This means that a target decoded sentence must be at least min_dec_ratio*len(source sentence)"\
" and not longer than max_dec_ratio*len(source sentence)\nFORMAT: <min ration> <max ratio>\n"\
"DEFAULT: 0.5, 1.5")
("Dump-LSTM",po::value<std::string>(¶ms.LSTM_dump_file),"Print the output at each timestep from the LSTM\nFORMAT: <output file name>\n"\
"The file lines that are output are the following: 1.input word, embedding 2.Forget gate 3.input gate"\
" 4.c_t 5.output gate 6.h_t 7.probabilities");
po::variables_map vm;
try {
po::store(po::parse_command_line(argc, argv, desc), vm);
po::notify(vm);
//see if the user specified the help flag
if ( vm.count("help") ) {
std::cout << "\n------------------------------\n";
std::cout << "This is Barret Zoph's GPU RNN library\n"
<< "The flags for the command line interface are below\n"
<< "" << "\n";
std::cout << desc << "\n";
exit (EXIT_FAILURE);
}
//error checks to be sure only once of these options is set
if (vm.count("train") && vm.count("kbest")) {
std::cout << "ERROR: you cannot train and get kbest at the same time\n";
exit (EXIT_FAILURE);
}
if (vm.count("train") && vm.count("force-decode")) {
std::cout << "ERROR: you cannot train and force-decode at the same time\n";
exit (EXIT_FAILURE);
}
if (vm.count("force-decode") && vm.count("kbest")) {
std::cout << "ERROR: you cannot force-decode and get kbest at the same time\n";
exit (EXIT_FAILURE);
}
if (!(vm.count("train") || vm.count("force-decode") || vm.count("kbest")||vm.count("stoch-gen") || vm.count("cont-train") )) {
std::cout << "ERROR: you must either train,continue training,get kbest,stoch generate data or force-decode\n";
exit (EXIT_FAILURE);
}
params.longest_sent+=4; //because it is really 4 less
if(vm.count("train") || vm.count("cont-train")) {
//some basic error checks to parameters
if(params.learning_rate<=0) {
std::cout << "ERROR: you cannot have a learning rate <=0\n";
exit (EXIT_FAILURE);
}
if(params.minibatch_size<=0) {
std::cout << "ERROR: you cannot have a minibatch of size <=0\n";
exit (EXIT_FAILURE);
}
if(params.LSTM_size<=0) {
std::cout << "ERROR: you cannot have a hiddenstate of size <=0\n";
exit (EXIT_FAILURE);
}
if(params.source_vocab_size<=0) {
if(params.source_vocab_size!=-1) {
std::cout << "ERROR: you cannot have a source_vocab_size <=0\n";
exit (EXIT_FAILURE);
}
}
if(params.target_vocab_size<=0) {
if(params.target_vocab_size!=-1) {
std::cout << "ERROR: you cannot have a target_vocab_size <=0\n";
exit (EXIT_FAILURE);
}
}
if(params.norm_clip<=0) {
std::cout << "ERROR: you cannot have your norm clip <=0\n";
exit (EXIT_FAILURE);
}
if(params.num_epochs<=0) {
std::cout << "ERROR: you cannot have num_epochs <=0\n";
exit (EXIT_FAILURE);
}
if(vm.count("HPC-output")) {
params.HPC_output = true;
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.train_file_name = params.unique_dir+"/train.txt";
if(vm.count("cont-train")) {
//sequence model
if(vm.count("sequence")) {
if(cont_train.size()!=2) {
std::cout << cont_train.size() << "\n";
std::cout << "ERROR: two arguements to be supplied to the continue train flag\n"\
" 1. train data file name, 2. neural network file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.target_file_name = cont_train[0];
params.input_weight_file = cont_train[1];
params.output_weight_file = cont_train[1];
params.LM = true;
params.load_model_train = true;
params.load_model_name = params.input_weight_file;
input_file_prep input_helper;
input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.train_file_name,
params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size);
}
else {
if(cont_train.size()!=3) {
std::cout << "ERROR: three arguements to be supplied to the continue train flag\n"\
" 1. source train data file name 2. target train data file name 3. neural network file name \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = cont_train[0];
params.target_file_name = cont_train[1];
params.input_weight_file = cont_train[2];
params.output_weight_file = cont_train[2];
params.load_model_train = true;
params.load_model_name = params.input_weight_file;
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name,
params.target_file_name,params.train_file_name,params.longest_sent,params.minibatch_size,params.LSTM_size,
params.source_vocab_size,params.target_vocab_size);
}
}
else {
//now create the necessary files
if(vm.count("sequence")) {
if(train_files.size()!=2) {
std::cout << "ERROR: two arguements to be supplied to the train flag"\
" 1. train data file name, 2. neural network output name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = true;
params.target_file_name = train_files[0];
params.output_weight_file = train_files[1];
input_file_prep input_helper;
//this outputs the train.txt file along with the mappings and first line
bool success = input_helper.prep_files_train_LM(params.minibatch_size,params.longest_sent,
params.target_file_name,
params.train_file_name,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size);
//clean up if error
if(!success) {
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
else {
//then sequence to sequence model
if(train_files.size()!=3) {
std::cout << train_files.size() <<"\n";
std::cout << "ERROR: three arguements to be supplied to the train flag for the sequence to sequence model\n"\
" 1. source train data file name\n 2. target train data file name \n3. neural network output name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = train_files[0];
params.target_file_name = train_files[1];
params.output_weight_file = train_files[2];
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
bool success = input_helper.prep_files_train_nonLM(params.minibatch_size,params.longest_sent,
params.source_file_name,params.target_file_name,
params.train_file_name,params.source_vocab_size,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size);
//clean up if error
if(!success) {
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
}
if(vm.count("parameter-range")) {
if(lower_upper_range.size()!=2) {
std::cout << "ERROR: you must have two inputs to parameter-range\n1.lower bound\n2. upper bound\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
precision temp_lower = lower_upper_range[0];
precision temp_upper = lower_upper_range[1];
if(temp_lower >= temp_upper) {
std::cout << "ERROR: the lower parameter range cannot be greater than the upper range\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.lower_range = temp_lower;
params.upper_range = temp_upper;
}
if(vm.count("fixed-halve-lr")) {
params.google_learning_rate = true;
if(params.epoch_to_start_halving<=0) {
std::cout << "ERROR: cannot halve learning rate until 1st epoch \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
if(vm.count("adaptive-halve-lr")) {
params.learning_rate_schedule = true;
if(vm.count("sequence")) {
if(adaptive_learning_rate.size()!=1) {
std::cout << "ERROR: adaptive-halve-lr takes one arguement\n1.dev file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.dev_target_file_name = adaptive_learning_rate[0];
params.test_file_name = params.unique_dir + "/validation.txt";
input_file_prep input_helper;
input_helper.integerize_file_LM(params.output_weight_file,params.dev_target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size);
}
else {
if(adaptive_learning_rate.size()!=2) {
std::cout << "ERROR: adaptive-halve-lr takes two arguements\n1.source dev file name\n2.target dev file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.dev_source_file_name = adaptive_learning_rate[0];
params.dev_target_file_name = adaptive_learning_rate[1];
params.test_file_name = params.unique_dir + "/validation.txt";
if(params.dev_source_file_name == params.dev_target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.output_weight_file,params.dev_source_file_name,
params.dev_target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,params.LSTM_size,params.source_vocab_size,params.target_vocab_size);
}
if(vm.count("best-model")) {
params.best_model = true;
}
}
if(vm.count("truncated-softmax")) {
params.shortlist_size = std::stoi(trunc_info[0]);
params.sampled_size = std::stoi(trunc_info[1]);
params.truncated_softmax = true;
if(params.shortlist_size + params.sampled_size > params.target_vocab_size) {
std::cout << "ERROR: you cannot have shortlist size + sampled size >= target vocab size\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
params.train= true;
params.decode=false;
params.test = false;
params.stochastic_generation = false;
return;
}
if(vm.count("kbest")) {
if (kbest_files.size()!=4) {
std::cout << "ERROR: 4 arguements must be entered for kbest, 1. number of best paths"\
" 2 input file name "
" 3. neural network file name (this is the output file you get after training the neural network)"\
" 4. output file name\n";
exit (EXIT_FAILURE);
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.decode_file_name = params.unique_dir+"/decoder_input.txt";
params.decoder_output_file = params.unique_dir+"/decoder_output.txt";
params.num_hypotheses =std::stoi(kbest_files[0]);
params.decode_tmp_file = kbest_files[1];
params.input_weight_file = kbest_files[2];
params.decoder_final_file = kbest_files[3];
input_file_prep input_helper;
// input_helper.integerize_file_LM(params.input_weight_file,params.decode_tmp_file,"tmp/decoder_input.txt",
// params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size,true,params.source_vocab_size);
input_helper.integerize_file_kbest(params.input_weight_file,params.decode_tmp_file,params.decode_file_name,
params.longest_sent,params.LSTM_size,params.target_vocab_size,params.source_vocab_size);
if(params.beam_size<=0) {
std::cout << "ERROR: beam size cannot be <=0\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
if(params.penalty<0) {
std::cout << "ERROR: penalty cannot be less than zero\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
if(vm.count("Dump-LSTM")) {
params.dump_LSTM=true;
}
if(vm.count("dec-ratio")) {
if(decoding_ratio.size()!=2) {
std::cout << "Decoding ratio size: " << decoding_ratio.size() << "\n";
std::cout << decoding_ratio[0] << "\n";
std::cout << "ERROR: only two inputs for decoding ratio\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.min_decoding_ratio = decoding_ratio[0];
params.max_decoding_ratio = decoding_ratio[1];
if(params.min_decoding_ratio >= params.max_decoding_ratio) {
std::cout << "ERROR: min decoding ratio must be <= max_decoding_ratio\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
}
params.train = false;
params.decode = true;
params.test = false;
params.stochastic_generation = false;
params.LM = false;
return;
}
if(vm.count("force-decode")) {
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.test_file_name = params.unique_dir + "/validation.txt";
if(vm.count("sequence")) {
if(test_files.size()!=3) {
std::cout << "ERROR: force-decode takes three arguements 1.input file name (input sentences)"\
"2. neural network file name 3.output file name \n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.target_file_name = test_files[0];
params.input_weight_file = test_files[1];
params.output_force_decode = test_files[2];
params.LM = true;
input_file_prep input_helper;
input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.test_file_name,
params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size);
}
else {
if(test_files.size()!=4) {
std::cout << "ERROR: force-decode takes four arguements: 1. source input file"\
" 2. target input file 3. neural network file name 4. output file name\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = test_files[0];
params.target_file_name = test_files[1];
params.input_weight_file = test_files[2];
params.output_force_decode = test_files[3];
if(params.source_file_name == params.target_file_name) {
std::cout << "ERROR: do not use the same file for source and target data\n";
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name,
params.target_file_name,params.test_file_name,params.longest_sent,1,params.LSTM_size,
params.source_vocab_size,params.target_vocab_size);
}
params.train= false;
params.decode=false;
params.test = true;
params.minibatch_size=1;
params.stochastic_generation = false;
return;
}
if(vm.count("stoch-gen")) {
if(!vm.count("sequence")) {
std::cout << "ERROR: you can only do stoch-gen on the sequence model\n";
exit (EXIT_FAILURE);
}
if(stoicgen_files.size()!=2) {
std::cout << "ERROR: stoch-gen takes two inputs"\
" 1. neural network file name 2. output file name\n";
exit (EXIT_FAILURE);
}
boost::filesystem::path unique_path = boost::filesystem::unique_path();
std::cout << "Temp directory being created named: " << unique_path.string() << "\n";
boost::filesystem::create_directories(unique_path);
params.unique_dir = unique_path.string();
params.sg_output_file_temp = params.unique_dir + "/sg.txt";
params.input_weight_file = stoicgen_files[0];
params.sg_output_file = stoicgen_files[1];
std::ifstream weights_file;
std::vector<std::string> info;
std::string str;
std::string word;
weights_file.open(params.input_weight_file.c_str());
weights_file.seekg(0, std::ios::beg);
std::getline(weights_file, str); //info from first sentence
std::istringstream iss(str, std::istringstream::in);
while(iss >> word) {
info.push_back(word);
}
weights_file.close();
params.LSTM_size = std::stoi(info[1]);
params.target_vocab_size = std::stoi(info[2]);
params.LM = true;
params.train= false;
params.decode = false;
params.test = false;
params.minibatch_size = 1;
params.stochastic_generation = true;
return;
}
}
catch(po::error& e) {
std::cerr << "ERROR: " << e.what() << std::endl << std::endl;
//std::cerr << desc << std::endl;
exit (EXIT_FAILURE);
}
}
int main(int argc, char **argv) {
//Timing stuff
std::chrono::time_point<std::chrono::system_clock> start_total,
end_total, begin_minibatch,end_minibatch,begin_decoding,end_decoding;
std::chrono::duration<double> elapsed_seconds;
start_total = std::chrono::system_clock::now();
//Initializing the model
global_params params; //Declare all of the global parameters
//create tmp directory if it does not exist already
// if( !(boost::filesystem::exists("tmp/"))) {
// std::cout << "Creating tmp directory for program\n";
// boost::filesystem::create_directory("tmp/");
// }
//file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file); //Initialize the file information
//get the command line arguements
command_line_parse(params,argc,argv);
neuralMT_model<precision> model; //This is the model
params.printIntroMessage();
BZ_CUDA::lower = params.lower_range;
BZ_CUDA::upper = params.upper_range;
if(params.google_learning_rate && params.learning_rate_schedule) {
std::cout << "ERROR: do not select both the fixed learning rate schedule and the perplexity based scheduler";
std::cout << "I Guarantee this is not what you intended to do\n";
exit (EXIT_FAILURE);
}
if(!params.decode) {
model.initModel(params.LSTM_size,params.minibatch_size,params.source_vocab_size,params.target_vocab_size,
params.longest_sent,params.debug,params.learning_rate,params.clip_gradient,params.norm_clip,
params.input_weight_file,params.output_weight_file,params.softmax_scaled,params.train_perplexity,params.truncated_softmax,
params.shortlist_size,params.sampled_size,params.LM);
}
if(params.load_model_train) {
std::string temp_swap_weights = model.input_weight_file;
model.input_weight_file = params.load_model_name;
model.load_weights();
model.input_weight_file = temp_swap_weights;
}
std::ofstream HPC_output;
if(params.HPC_output) {
HPC_output.open("HPC_OUTPUT.txt");
}
////////////////////////////////////Train the model//////////////////////////////////////
if(params.train) {
//info for averaging the speed
int curr_batch_num_SPEED = 0;
const int thres_batch_num_SPEED = params.screen_print_rate;//set this to whatever
int total_words_batch_SPEED = 0;
double total_batch_time_SPEED = 0;
//File info for the training file
file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,params.train_total_words,params.truncated_softmax,
params.shortlist_size,params.sampled_size); //Initialize the file information
//model.initFileInfo(&file_info);
params.half_way_count = params.train_total_words/2;
if(params.google_learning_rate) {
std::cout << "Words at which to start halving the learning rate: " << params.half_way_count << "\n";
if(params.HPC_output) {
HPC_output << "Words at which to start halving the learning rate: " << params.half_way_count << "\n";
HPC_output.flush();
}
}
int current_epoch = 1;
std::cout << "Starting model training\n";
std::cout << "Starting epoch 1\n";
if(params.HPC_output) {
HPC_output << "Starting model training\n";
HPC_output << "Starting epoch 1\n";
HPC_output.flush();
}
//stuff for learning rate schedule
int total_words = 0;
precision temp_learning_rate = params.learning_rate; //This is only for the google learning rate
bool learning_rate_flag =true;//used for google learning rate for halving at every 0.5 epochs
double old_perplexity = 0;
model.train_perplexity = 0; //set the model perplexity to zero
while(current_epoch <= params.num_epochs) {
begin_minibatch = std::chrono::system_clock::now();
bool success = file_info.read_minibatch();
end_minibatch = std::chrono::system_clock::now();
elapsed_seconds = end_minibatch-begin_minibatch;
//std::cout << "File I/O time: " << elapsed_seconds.count()/60.0 << " minutes\n";
total_batch_time_SPEED+= elapsed_seconds.count();
begin_minibatch = std::chrono::system_clock::now();
//cudaProfilerStart();
model.compute_gradients(file_info.minibatch_tokens_source_input,file_info.minibatch_tokens_source_output,
file_info.minibatch_tokens_target_input,file_info.minibatch_tokens_target_output,
file_info.h_input_vocab_indicies_source,file_info.h_output_vocab_indicies_source,
file_info.h_input_vocab_indicies_target,file_info.h_output_vocab_indicies_target,
file_info.current_source_length,file_info.current_target_length,
file_info.h_input_vocab_indicies_source_Wgrad,file_info.h_input_vocab_indicies_target_Wgrad,
file_info.len_source_Wgrad,file_info.len_target_Wgrad,file_info.h_sampled_indices,
file_info.len_unique_words_trunc_softmax);
// cudaProfilerStop();
// return 0;
end_minibatch = std::chrono::system_clock::now();
elapsed_seconds = end_minibatch-begin_minibatch;
total_batch_time_SPEED+= elapsed_seconds.count();
total_words_batch_SPEED+=file_info.words_in_minibatch;
if(curr_batch_num_SPEED>=thres_batch_num_SPEED) {
std::cout << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n";
std::cout << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n";
std::cout << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n";
std::cout << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n";
if(params.HPC_output) {
HPC_output << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n";
HPC_output << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n";
HPC_output << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n";
HPC_output << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n";
HPC_output.flush();
}
total_words_batch_SPEED = 0;
total_batch_time_SPEED = 0;
curr_batch_num_SPEED = 0;
}
curr_batch_num_SPEED++;
total_words += file_info.words_in_minibatch;
//stuff for google learning rate
if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving && total_words>=params.half_way_count &&
learning_rate_flag) {
temp_learning_rate = temp_learning_rate/2;
std::cout << "New Learning Rate: " << temp_learning_rate << "\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = false;
if(params.HPC_output) {
HPC_output << "New Learning Rate: " << temp_learning_rate << "\n";
HPC_output.flush();
}
}
//stuff for perplexity based learning schedule
if(params.learning_rate_schedule && total_words>=params.half_way_count &&learning_rate_flag) {
learning_rate_flag = false;
double new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,false,params.test_total_words,params.HPC_output,false,"");
std::cout << "Old dev set Perplexity: " << old_perplexity << "\n";
std::cout << "New dev set Perplexity: " << new_perplexity << "\n";
if(params.HPC_output) {
HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n";
HPC_output << "New dev set Perplexity: " << new_perplexity << "\n";
HPC_output.flush();
}
if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) {
temp_learning_rate = temp_learning_rate*params.decrease_factor;
model.update_learning_rate(temp_learning_rate);
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
//perplexity is better so output the best model file
if(params.best_model && params.best_model_perp > new_perplexity) {
std::cout << "Now outputting the new best model\n";
model.dump_best_model(params.best_model_file_name,params.output_weight_file);
if(params.HPC_output) {
HPC_output << "Now outputting the new best model\n";
HPC_output.flush();
}
params.best_model_perp = new_perplexity;
}
old_perplexity = new_perplexity;
}
if(!success) {
current_epoch+=1;
//stuff for google learning rate schedule
if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving) {
temp_learning_rate = temp_learning_rate/2;
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = true;
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
}
double new_perplexity;
if(params.google_learning_rate || params.learning_rate_schedule) {
new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,false,params.test_total_words,params.HPC_output,false,"");
}
//stuff for perplexity based learning schedule
if(params.learning_rate_schedule) {
std::cout << "Old dev set Perplexity: " << old_perplexity << "\n";
std::cout << "New dev set Perplexity: " << new_perplexity << "\n";
if(params.HPC_output) {
HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n";
HPC_output << "New dev set Perplexity: " << new_perplexity << "\n";
HPC_output.flush();
}
if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) {
if(params.HPC_output) {
HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
HPC_output.flush();
}
temp_learning_rate = temp_learning_rate*params.decrease_factor;
model.update_learning_rate(temp_learning_rate);
std::cout << "New learning rate:" << temp_learning_rate <<"\n\n";
}
//perplexity is better so output the best model file
if(params.best_model && params.best_model_perp > new_perplexity) {
std::cout << "Now outputting the new best model\n";
model.dump_best_model(params.best_model_file_name,params.output_weight_file);
if(params.HPC_output) {
HPC_output << "Now outputting the new best model\n";
HPC_output.flush();
}
params.best_model_perp = new_perplexity;
}
learning_rate_flag = true;
old_perplexity = new_perplexity;
}
if(params.train_perplexity) {
std::cout << "PData on train set:" << model.train_perplexity << "\n";
std::cout << "Total target words: " << file_info.total_target_words << "\n";
std::cout << "Training set perplexity: " << std::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n";
if(params.HPC_output) {
HPC_output << "Training set perplexity: " << std::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n";
HPC_output.flush();
}
model.train_perplexity = 0;
}
total_words=0;
if(current_epoch <= params.num_epochs) {
std::cout << "-----------------------------------" << std::endl;
std::cout << "Starting epoch " << current_epoch << std::endl;
std::cout << "-----------------------------------" << std::endl;
if(params.HPC_output) {
HPC_output << "-----------------------------------" << std::endl;
HPC_output << "Starting epoch " << current_epoch << std::endl;
HPC_output << "-----------------------------------" << std::endl;
HPC_output.flush();
}
}
}
cudaDeviceSynchronize();
}
//Now that training is done, dump the weights
cudaDeviceSynchronize();
model.dump_weights();
}
/////////////////////////////////Get perplexity on test set////////////////////////////////
if(params.test) {
model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,HPC_output,true,params.test_total_words,params.HPC_output,true,params.output_force_decode);
}
if(params.LM && params.stochastic_generation) {
model.stoicastic_generation(params.sg_length,params.sg_output_file_temp,params.temperature);
input_file_prep input_helper;
input_helper.unint_file(params.input_weight_file,params.sg_output_file_temp,params.sg_output_file,true,false);
}
///////////////////////////////////////////decode the model////////////////////////////////////////////
if(params.decode) {
std::cout << "-----------------Starting Decoding----------------\n";
begin_decoding = std::chrono::system_clock::now();
model.beam_decoder(params.beam_size,params.decode_file_name,
params.input_weight_file,params.decode_num_lines_in_file,params.source_vocab_size,
params.target_vocab_size,params.longest_sent,params.LSTM_size,params.penalty,
params.decoder_output_file,params.min_decoding_ratio,params.max_decoding_ratio,params.softmax_scaled,
params.num_hypotheses,params.print_score,params.dump_LSTM,params.LSTM_dump_file);
end_decoding = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end_decoding-begin_decoding;
std::cout << "Decoding time: " << elapsed_seconds.count()/60.0 << " minutes\n";
//now unintegerize the file
input_file_prep input_helper;
input_helper.unint_file(params.input_weight_file,params.decoder_output_file,params.decoder_final_file,false,true);
}
//remove the temp directory created
if(params.unique_dir!="NULL") {
boost::filesystem::path temp_path(params.unique_dir);
boost::filesystem::remove_all(temp_path);
}
//Compute the final runtime
end_total = std::chrono::system_clock::now();
elapsed_seconds = end_total-start_total;
std::cout << "\n\n\n";
std::cout << "Total Program Runtime: " << elapsed_seconds.count()/60.0 << " minutes" << std::endl;
}
|
6e19a8a70799a7572d8984bd8f960ea1e9cbf65f.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_particlePosReshuffle;
glm::vec3 *dev_particleVelReshuffle;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_pointer_cast<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_pointer_cast<int>(dev_particleGridIndices);
hipMalloc((void**)&dev_particlePosReshuffle, N * sizeof(glm::vec3));
hipMalloc((void**)&dev_particleVelReshuffle, N * sizeof(glm::vec3));
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 newVelocity = vel[iSelf];
float3 center = make_float3(0.0f, 0.0f, 0.0f);
float3 separate = make_float3(0.0f, 0.0f, 0.0f);
float3 cohesion = make_float3(0.0f, 0.0f, 0.0f);
int neighborCount1 = 0;
int neighborCount3 = 0;
float3 thisBoidPos = make_float3(pos[iSelf].x, pos[iSelf].y, pos[iSelf].z);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
for (int i = 0; i < N; i++) {
if (i == iSelf) continue;
float distance = glm::length(pos[i] - pos[iSelf]);
float3 thatBoidPos = make_float3(pos[i].x, pos[i].y, pos[i].z);
if (distance < rule1Distance) {
center.x += thatBoidPos.x;
center.y += thatBoidPos.y;
center.z += thatBoidPos.z;
neighborCount1++;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance) {
separate.x -= thatBoidPos.x - thisBoidPos.x;
separate.y -= thatBoidPos.y - thisBoidPos.y;
separate.z -= thatBoidPos.z - thisBoidPos.z;
}
// Rule 3: boids try to match the speed of surrounding boids
if(distance < rule3Distance) {
cohesion.x += vel[i].x;
cohesion.y += vel[i].y;
cohesion.z += vel[i].z;
neighborCount3++;
}
}
glm::vec3 v1(0.0f, 0.0f, 0.0f);
glm::vec3 v2 = v1;
glm::vec3 v3 = v1;
if (neighborCount1 > 0) {
center.x /= neighborCount1;
center.y /= neighborCount1;
center.z /= neighborCount1;
v1.x = (center.x - thisBoidPos.x) * rule1Scale;
v1.y = (center.y - thisBoidPos.y) * rule1Scale;
v1.z = (center.z - thisBoidPos.z) * rule1Scale;
}
if (neighborCount3 > 0) {
v3.x = cohesion.x * rule3Scale;
v3.y = cohesion.y * rule3Scale;
v3.z = cohesion.z * rule3Scale;
v3 /= neighborCount3;
}
v2.x = separate.x * rule2Scale;
v2.y = separate.y * rule2Scale;
v2.z = separate.z * rule2Scale;
return v1 + v2 + v3 + vel[iSelf];
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 newVelocity = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
float speed = glm::length(newVelocity);
if (speed > maxSpeed) {
newVelocity = (newVelocity / speed) * maxSpeed;
}
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = newVelocity;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisBoidPos = pos[index];
glm::vec3 gridPos = glm::floor((thisBoidPos - gridMin) * inverseCellWidth);
int gridIndex = gridIndex3Dto1D((int)gridPos.x, (int)gridPos.y, (int)gridPos.z, gridResolution);
gridIndices[index] = gridIndex;
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int curGridIndex = particleGridIndices[index];
if (index == 0) {
gridCellStartIndices[curGridIndex] = index;
return;
}
if (index == N - 1) {
gridCellEndIndices[curGridIndex] = index;
}
int prevGridIndex = particleGridIndices[index - 1];
// If the current grid index not equals to the previous one,
// update both the start index of the current grid index
// and the end index of the previous grid index
if (curGridIndex != prevGridIndex) {
gridCellStartIndices[curGridIndex] = index;
gridCellEndIndices[prevGridIndex] = index - 1;
}
return;
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisBoidPos = pos[index];
float maxDistance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
glm::vec3 maxDistanceCoord(maxDistance);
glm::vec3 maxGridPos = thisBoidPos + maxDistanceCoord;
glm::vec3 minGridPos = thisBoidPos - maxDistanceCoord;
glm::vec3 maxGrid = glm::ceil((maxGridPos - gridMin) * inverseCellWidth);
maxGrid = glm::clamp(maxGrid, glm::vec3(0.0), glm::vec3(gridResolution));
glm::vec3 minGrid = glm::floor((minGridPos - gridMin) * inverseCellWidth);
minGrid = glm::clamp(minGrid, glm::vec3(0.0), glm::vec3(gridResolution));
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int neighborCount1 = 0;
int neighborCount3 = 0;
glm::vec3 center(0.0f, 0.0f, 0.0f);
glm::vec3 separate(0.0f, 0.0f, 0.0f);
glm::vec3 cohesion(0.0f, 0.0f, 0.0f);
glm::vec3 thisBoidNewVel = vel1[index];
for (int k = minGrid.z; k <= maxGrid.z; k++) {
for (int j = minGrid.y; j <= maxGrid.y; j++) {
for (int i = minGrid.x; i <= maxGrid.x; i++) {
int neighGridX = i;
int neighGridY = j;
int neighGridZ = k;
int neighGridIndex = gridIndex3Dto1D(neighGridX, neighGridY, neighGridZ, gridResolution);
int startIndex = gridCellStartIndices[neighGridIndex];
int endIndex = gridCellEndIndices[neighGridIndex];
if (startIndex < 0 || startIndex >= N || endIndex < 0 || endIndex >= N) continue;
for (int idx = startIndex; idx <= endIndex; idx++) {
int boidIndex = particleArrayIndices[idx];
if (boidIndex == index) continue;
glm::vec3 thatBoidPos = pos[boidIndex];
glm::vec3 thatBoidVel = vel1[boidIndex];
float distance = glm::length(thisBoidPos - thatBoidPos);
if (distance < rule1Distance) {
center += thatBoidPos;
neighborCount1++;
}
if (distance < rule2Distance) {
separate -= (thatBoidPos - thisBoidPos);
}
if (distance < rule3Distance) {
cohesion += thatBoidVel;
neighborCount3++;
}
}
}
}
}
if (neighborCount1 > 0) {
center /= neighborCount1;
thisBoidNewVel += (center - thisBoidPos) * rule1Scale;
}
if (neighborCount3 > 0) {
thisBoidNewVel += cohesion * rule3Scale;
}
thisBoidNewVel += separate * rule2Scale;
float speed = glm::length(thisBoidNewVel);
if (speed > maxSpeed) {
thisBoidNewVel = thisBoidNewVel * maxSpeed / speed;
}
vel2[index] = thisBoidNewVel;
}
__global__ void kernReshuffle(int N, int* indices, glm::vec3* attribute, glm::vec3* shuffleArrtribute) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
shuffleArrtribute[index] = attribute[indices[index]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisBoidPos = pos[index];
glm::vec3 gridPos = glm::floor((thisBoidPos - gridMin) * inverseCellWidth);
int gridIndex = gridIndex3Dto1D((int)gridPos.x, (int)gridPos.y, (int)gridPos.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int neighborCount1 = 0;
int neighborCount3 = 0;
glm::vec3 center(0.0f, 0.0f, 0.0f);
glm::vec3 separate(0.0f, 0.0f, 0.0f);
glm::vec3 cohesion(0.0f, 0.0f, 0.0f);
glm::vec3 thisBoidNewVel = vel1[index];
for (int k = -1; k <= 1; k++) {
for (int j = -1; j <= 1; j++) {
for (int i = -1; i <= 1; i++) {
int neighGridX = (int)gridPos.x + i;
int neighGridY = (int)gridPos.y + j;
int neighGridZ = (int)gridPos.z + k;
int neighGridIndex = gridIndex3Dto1D(neighGridX, neighGridY, neighGridZ, gridResolution);
int startIndex = gridCellStartIndices[neighGridIndex];
int endIndex = gridCellEndIndices[neighGridIndex];
if (startIndex < 0 || startIndex >= N || endIndex < 0 || endIndex >= N) continue;
for (int idx = startIndex; idx <= endIndex; idx++) {
glm::vec3 thatBoidPos = pos[idx];
if (thatBoidPos == thisBoidPos) continue;
glm::vec3 thatBoidVel = vel1[idx];
float distance = glm::length(thisBoidPos - thatBoidPos);
if (distance < rule1Distance) {
center += thatBoidPos;
neighborCount1++;
}
if (distance < rule2Distance) {
separate -= (thatBoidPos - thisBoidPos);
}
if (distance < rule3Distance) {
cohesion += thatBoidVel;
neighborCount3++;
}
}
}
}
}
if (neighborCount1 > 0) {
center /= neighborCount1;
thisBoidNewVel += (center - thisBoidPos) * rule1Scale;
}
if (neighborCount3 > 0) {
thisBoidNewVel += cohesion * rule3Scale;
}
thisBoidNewVel += separate * rule2Scale;
float speed = glm::length(thisBoidNewVel);
if (speed > maxSpeed) {
thisBoidNewVel = thisBoidNewVel * maxSpeed / speed;
}
vel2[index] = thisBoidNewVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce) , dim3(fullBlocksPerGrid), dim3(threadsPerBlock) , 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernComputeIndices) , dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0,
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
dev_pos,
dev_particleArrayIndices,
dev_particleGridIndices);
checkCUDAErrorWithLine(" kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// Wrap device vectors in thrust iterators for use with thrust.
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernResetIntBuffer << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("failed at kernResetIntBuffer");
kernResetIntBuffer << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("failed at kernResetIntBuffer");
hipLaunchKernelGGL(( kernIdentifyCellStartEnd) , dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0,
numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices,
dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered) , dim3(fullBlocksPerGrid), dim3(threadsPerBlock) , 0, 0,
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
gridCellWidth,
dev_gridCellStartIndices,
dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos,
dev_vel1,
dev_vel2);
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, threadsPerBlock >> > (
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
dev_pos,
dev_particleArrayIndices,
dev_particleGridIndices);
checkCUDAErrorWithLine(" kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernResetIntBuffer << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("failed at kernResetIntBuffer");
kernResetIntBuffer << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("failed at kernResetIntBuffer");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, threadsPerBlock >> > (
numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices,
dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernReshuffle << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_particlePosReshuffle);
checkCUDAErrorWithLine("failed at kernReshuffle Pos");
kernReshuffle << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_particleArrayIndices, dev_vel1, dev_particleVelReshuffle);
checkCUDAErrorWithLine("failed at kernReshuffle Vel");
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, threadsPerBlock >> > (
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
gridCellWidth,
dev_gridCellStartIndices,
dev_gridCellEndIndices,
dev_particlePosReshuffle,
dev_particleVelReshuffle,
dev_vel2);
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_particlePosReshuffle, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
hipMemcpy(dev_pos, dev_particlePosReshuffle, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_particlePosReshuffle);
hipFree(dev_particleVelReshuffle);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 6e19a8a70799a7572d8984bd8f960ea1e9cbf65f.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_particlePosReshuffle;
glm::vec3 *dev_particleVelReshuffle;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_pointer_cast<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_pointer_cast<int>(dev_particleGridIndices);
cudaMalloc((void**)&dev_particlePosReshuffle, N * sizeof(glm::vec3));
cudaMalloc((void**)&dev_particleVelReshuffle, N * sizeof(glm::vec3));
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 newVelocity = vel[iSelf];
float3 center = make_float3(0.0f, 0.0f, 0.0f);
float3 separate = make_float3(0.0f, 0.0f, 0.0f);
float3 cohesion = make_float3(0.0f, 0.0f, 0.0f);
int neighborCount1 = 0;
int neighborCount3 = 0;
float3 thisBoidPos = make_float3(pos[iSelf].x, pos[iSelf].y, pos[iSelf].z);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
for (int i = 0; i < N; i++) {
if (i == iSelf) continue;
float distance = glm::length(pos[i] - pos[iSelf]);
float3 thatBoidPos = make_float3(pos[i].x, pos[i].y, pos[i].z);
if (distance < rule1Distance) {
center.x += thatBoidPos.x;
center.y += thatBoidPos.y;
center.z += thatBoidPos.z;
neighborCount1++;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance) {
separate.x -= thatBoidPos.x - thisBoidPos.x;
separate.y -= thatBoidPos.y - thisBoidPos.y;
separate.z -= thatBoidPos.z - thisBoidPos.z;
}
// Rule 3: boids try to match the speed of surrounding boids
if(distance < rule3Distance) {
cohesion.x += vel[i].x;
cohesion.y += vel[i].y;
cohesion.z += vel[i].z;
neighborCount3++;
}
}
glm::vec3 v1(0.0f, 0.0f, 0.0f);
glm::vec3 v2 = v1;
glm::vec3 v3 = v1;
if (neighborCount1 > 0) {
center.x /= neighborCount1;
center.y /= neighborCount1;
center.z /= neighborCount1;
v1.x = (center.x - thisBoidPos.x) * rule1Scale;
v1.y = (center.y - thisBoidPos.y) * rule1Scale;
v1.z = (center.z - thisBoidPos.z) * rule1Scale;
}
if (neighborCount3 > 0) {
v3.x = cohesion.x * rule3Scale;
v3.y = cohesion.y * rule3Scale;
v3.z = cohesion.z * rule3Scale;
v3 /= neighborCount3;
}
v2.x = separate.x * rule2Scale;
v2.y = separate.y * rule2Scale;
v2.z = separate.z * rule2Scale;
return v1 + v2 + v3 + vel[iSelf];
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 newVelocity = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
float speed = glm::length(newVelocity);
if (speed > maxSpeed) {
newVelocity = (newVelocity / speed) * maxSpeed;
}
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = newVelocity;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisBoidPos = pos[index];
glm::vec3 gridPos = glm::floor((thisBoidPos - gridMin) * inverseCellWidth);
int gridIndex = gridIndex3Dto1D((int)gridPos.x, (int)gridPos.y, (int)gridPos.z, gridResolution);
gridIndices[index] = gridIndex;
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int curGridIndex = particleGridIndices[index];
if (index == 0) {
gridCellStartIndices[curGridIndex] = index;
return;
}
if (index == N - 1) {
gridCellEndIndices[curGridIndex] = index;
}
int prevGridIndex = particleGridIndices[index - 1];
// If the current grid index not equals to the previous one,
// update both the start index of the current grid index
// and the end index of the previous grid index
if (curGridIndex != prevGridIndex) {
gridCellStartIndices[curGridIndex] = index;
gridCellEndIndices[prevGridIndex] = index - 1;
}
return;
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisBoidPos = pos[index];
float maxDistance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
glm::vec3 maxDistanceCoord(maxDistance);
glm::vec3 maxGridPos = thisBoidPos + maxDistanceCoord;
glm::vec3 minGridPos = thisBoidPos - maxDistanceCoord;
glm::vec3 maxGrid = glm::ceil((maxGridPos - gridMin) * inverseCellWidth);
maxGrid = glm::clamp(maxGrid, glm::vec3(0.0), glm::vec3(gridResolution));
glm::vec3 minGrid = glm::floor((minGridPos - gridMin) * inverseCellWidth);
minGrid = glm::clamp(minGrid, glm::vec3(0.0), glm::vec3(gridResolution));
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int neighborCount1 = 0;
int neighborCount3 = 0;
glm::vec3 center(0.0f, 0.0f, 0.0f);
glm::vec3 separate(0.0f, 0.0f, 0.0f);
glm::vec3 cohesion(0.0f, 0.0f, 0.0f);
glm::vec3 thisBoidNewVel = vel1[index];
for (int k = minGrid.z; k <= maxGrid.z; k++) {
for (int j = minGrid.y; j <= maxGrid.y; j++) {
for (int i = minGrid.x; i <= maxGrid.x; i++) {
int neighGridX = i;
int neighGridY = j;
int neighGridZ = k;
int neighGridIndex = gridIndex3Dto1D(neighGridX, neighGridY, neighGridZ, gridResolution);
int startIndex = gridCellStartIndices[neighGridIndex];
int endIndex = gridCellEndIndices[neighGridIndex];
if (startIndex < 0 || startIndex >= N || endIndex < 0 || endIndex >= N) continue;
for (int idx = startIndex; idx <= endIndex; idx++) {
int boidIndex = particleArrayIndices[idx];
if (boidIndex == index) continue;
glm::vec3 thatBoidPos = pos[boidIndex];
glm::vec3 thatBoidVel = vel1[boidIndex];
float distance = glm::length(thisBoidPos - thatBoidPos);
if (distance < rule1Distance) {
center += thatBoidPos;
neighborCount1++;
}
if (distance < rule2Distance) {
separate -= (thatBoidPos - thisBoidPos);
}
if (distance < rule3Distance) {
cohesion += thatBoidVel;
neighborCount3++;
}
}
}
}
}
if (neighborCount1 > 0) {
center /= neighborCount1;
thisBoidNewVel += (center - thisBoidPos) * rule1Scale;
}
if (neighborCount3 > 0) {
thisBoidNewVel += cohesion * rule3Scale;
}
thisBoidNewVel += separate * rule2Scale;
float speed = glm::length(thisBoidNewVel);
if (speed > maxSpeed) {
thisBoidNewVel = thisBoidNewVel * maxSpeed / speed;
}
vel2[index] = thisBoidNewVel;
}
__global__ void kernReshuffle(int N, int* indices, glm::vec3* attribute, glm::vec3* shuffleArrtribute) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
shuffleArrtribute[index] = attribute[indices[index]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisBoidPos = pos[index];
glm::vec3 gridPos = glm::floor((thisBoidPos - gridMin) * inverseCellWidth);
int gridIndex = gridIndex3Dto1D((int)gridPos.x, (int)gridPos.y, (int)gridPos.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int neighborCount1 = 0;
int neighborCount3 = 0;
glm::vec3 center(0.0f, 0.0f, 0.0f);
glm::vec3 separate(0.0f, 0.0f, 0.0f);
glm::vec3 cohesion(0.0f, 0.0f, 0.0f);
glm::vec3 thisBoidNewVel = vel1[index];
for (int k = -1; k <= 1; k++) {
for (int j = -1; j <= 1; j++) {
for (int i = -1; i <= 1; i++) {
int neighGridX = (int)gridPos.x + i;
int neighGridY = (int)gridPos.y + j;
int neighGridZ = (int)gridPos.z + k;
int neighGridIndex = gridIndex3Dto1D(neighGridX, neighGridY, neighGridZ, gridResolution);
int startIndex = gridCellStartIndices[neighGridIndex];
int endIndex = gridCellEndIndices[neighGridIndex];
if (startIndex < 0 || startIndex >= N || endIndex < 0 || endIndex >= N) continue;
for (int idx = startIndex; idx <= endIndex; idx++) {
glm::vec3 thatBoidPos = pos[idx];
if (thatBoidPos == thisBoidPos) continue;
glm::vec3 thatBoidVel = vel1[idx];
float distance = glm::length(thisBoidPos - thatBoidPos);
if (distance < rule1Distance) {
center += thatBoidPos;
neighborCount1++;
}
if (distance < rule2Distance) {
separate -= (thatBoidPos - thisBoidPos);
}
if (distance < rule3Distance) {
cohesion += thatBoidVel;
neighborCount3++;
}
}
}
}
}
if (neighborCount1 > 0) {
center /= neighborCount1;
thisBoidNewVel += (center - thisBoidPos) * rule1Scale;
}
if (neighborCount3 > 0) {
thisBoidNewVel += cohesion * rule3Scale;
}
thisBoidNewVel += separate * rule2Scale;
float speed = glm::length(thisBoidNewVel);
if (speed > maxSpeed) {
thisBoidNewVel = thisBoidNewVel * maxSpeed / speed;
}
vel2[index] = thisBoidNewVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce <<<fullBlocksPerGrid, threadsPerBlock >>> (numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices <<<fullBlocksPerGrid, threadsPerBlock>>> (
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
dev_pos,
dev_particleArrayIndices,
dev_particleGridIndices);
checkCUDAErrorWithLine(" kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// Wrap device vectors in thrust iterators for use with thrust.
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernResetIntBuffer << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("failed at kernResetIntBuffer");
kernResetIntBuffer << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("failed at kernResetIntBuffer");
kernIdentifyCellStartEnd <<<fullBlocksPerGrid, threadsPerBlock>>> (
numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices,
dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered <<<fullBlocksPerGrid, threadsPerBlock >>> (
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
gridCellWidth,
dev_gridCellStartIndices,
dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos,
dev_vel1,
dev_vel2);
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, threadsPerBlock >> > (
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
dev_pos,
dev_particleArrayIndices,
dev_particleGridIndices);
checkCUDAErrorWithLine(" kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernResetIntBuffer << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("failed at kernResetIntBuffer");
kernResetIntBuffer << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("failed at kernResetIntBuffer");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, threadsPerBlock >> > (
numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices,
dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernReshuffle << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_particlePosReshuffle);
checkCUDAErrorWithLine("failed at kernReshuffle Pos");
kernReshuffle << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_particleArrayIndices, dev_vel1, dev_particleVelReshuffle);
checkCUDAErrorWithLine("failed at kernReshuffle Vel");
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, threadsPerBlock >> > (
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
gridCellWidth,
dev_gridCellStartIndices,
dev_gridCellEndIndices,
dev_particlePosReshuffle,
dev_particleVelReshuffle,
dev_vel2);
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_particlePosReshuffle, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_pos, dev_particlePosReshuffle, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_particlePosReshuffle);
cudaFree(dev_particleVelReshuffle);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
c4bff4d8871ed6753a89eddd423573d835efe9da.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// Copyright (c) 2018-2023 www.open3d.org
// SPDX-License-Identifier: MIT
// ----------------------------------------------------------------------------
#define EIGEN_USE_GPU
#include "ContinuousConvTransposeBackpropFilterOpKernel.h"
#include "open3d/core/CUDAUtils.h"
#include "open3d/ml/impl/continuous_conv/ContinuousConvTransposeBackpropFilter.cuh"
using namespace open3d;
using namespace open3d::ml;
using namespace open3d::ml::impl;
using namespace tensorflow;
template <class TFeat, class TOut, class TReal, class TIndex>
class ContinuousConvTransposeBackpropFilterOpKernelCUDA
: public ContinuousConvTransposeBackpropFilterOpKernel<TIndex> {
public:
explicit ContinuousConvTransposeBackpropFilterOpKernelCUDA(
OpKernelConstruction* construction)
: ContinuousConvTransposeBackpropFilterOpKernel<TIndex>(construction) {
texture_alignment =
open3d::core::GetCUDACurrentDeviceTextureAlignment();
}
void Kernel(tensorflow::OpKernelContext* context,
const tensorflow::Tensor& filter,
const tensorflow::Tensor& out_positions,
const tensorflow::Tensor& out_importance,
const tensorflow::Tensor& extents,
const tensorflow::Tensor& offset,
const tensorflow::Tensor& inp_positions,
const tensorflow::Tensor& inp_features,
const tensorflow::Tensor& inp_neighbors_importance_sum,
const tensorflow::Tensor& inp_neighbors_row_splits,
const tensorflow::Tensor& neighbors_index,
const tensorflow::Tensor& neighbors_importance,
const tensorflow::Tensor& neighbors_row_splits,
const tensorflow::Tensor& out_features_gradient,
const std::vector<int>& filter_dims,
const bool individual_extents,
const bool isotropic_extents,
const bool point_importances,
const bool has_neighbors_importances,
tensorflow::Tensor& filter_backprop) {
auto device = context->eigen_gpu_device();
void* temp_ptr = nullptr;
size_t temp_size = 0;
size_t max_temp_size = 0;
// determine temp_size
CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TOut>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TFeat>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TFeat>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TFeat>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TFeat>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
temp_size =
::max(::min(size_t(this->max_temp_mem_MB) * 1024 * 1024,
max_temp_size),
temp_size);
Tensor temp_tensor;
TensorShape temp_shape({ssize_t(temp_size)});
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<uint8_t>::v(),
temp_shape, &temp_tensor));
temp_ptr = temp_tensor.flat<uint8_t>().data();
// actually run the operation
CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TOut>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TFeat>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TFeat>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TFeat>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TFeat>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
}
private:
int texture_alignment;
};
#define REG_KB(feattype, outtype, realtype, indextype) \
REGISTER_KERNEL_BUILDER( \
Name("Open3DContinuousConvTransposeBackpropFilter") \
.Device(DEVICE_GPU) \
.TypeConstraint<feattype>("TFeat") \
.TypeConstraint<outtype>("output_type") \
.TypeConstraint<realtype>("TReal") \
.TypeConstraint<indextype>("TIndex"), \
ContinuousConvTransposeBackpropFilterOpKernelCUDA< \
feattype, outtype, realtype, indextype>);
REG_KB(float, float, float, int32)
#undef REG_KB
| c4bff4d8871ed6753a89eddd423573d835efe9da.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// Copyright (c) 2018-2023 www.open3d.org
// SPDX-License-Identifier: MIT
// ----------------------------------------------------------------------------
#define EIGEN_USE_GPU
#include "ContinuousConvTransposeBackpropFilterOpKernel.h"
#include "open3d/core/CUDAUtils.h"
#include "open3d/ml/impl/continuous_conv/ContinuousConvTransposeBackpropFilter.cuh"
using namespace open3d;
using namespace open3d::ml;
using namespace open3d::ml::impl;
using namespace tensorflow;
template <class TFeat, class TOut, class TReal, class TIndex>
class ContinuousConvTransposeBackpropFilterOpKernelCUDA
: public ContinuousConvTransposeBackpropFilterOpKernel<TIndex> {
public:
explicit ContinuousConvTransposeBackpropFilterOpKernelCUDA(
OpKernelConstruction* construction)
: ContinuousConvTransposeBackpropFilterOpKernel<TIndex>(construction) {
texture_alignment =
open3d::core::GetCUDACurrentDeviceTextureAlignment();
}
void Kernel(tensorflow::OpKernelContext* context,
const tensorflow::Tensor& filter,
const tensorflow::Tensor& out_positions,
const tensorflow::Tensor& out_importance,
const tensorflow::Tensor& extents,
const tensorflow::Tensor& offset,
const tensorflow::Tensor& inp_positions,
const tensorflow::Tensor& inp_features,
const tensorflow::Tensor& inp_neighbors_importance_sum,
const tensorflow::Tensor& inp_neighbors_row_splits,
const tensorflow::Tensor& neighbors_index,
const tensorflow::Tensor& neighbors_importance,
const tensorflow::Tensor& neighbors_row_splits,
const tensorflow::Tensor& out_features_gradient,
const std::vector<int>& filter_dims,
const bool individual_extents,
const bool isotropic_extents,
const bool point_importances,
const bool has_neighbors_importances,
tensorflow::Tensor& filter_backprop) {
auto device = context->eigen_gpu_device();
void* temp_ptr = nullptr;
size_t temp_size = 0;
size_t max_temp_size = 0;
// determine temp_size
CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TOut>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TFeat>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TFeat>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TFeat>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TFeat>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
temp_size =
std::max(std::min(size_t(this->max_temp_mem_MB) * 1024 * 1024,
max_temp_size),
temp_size);
Tensor temp_tensor;
TensorShape temp_shape({ssize_t(temp_size)});
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<uint8_t>::v(),
temp_shape, &temp_tensor));
temp_ptr = temp_tensor.flat<uint8_t>().data();
// actually run the operation
CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TOut>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TFeat>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TFeat>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TFeat>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TFeat>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
}
private:
int texture_alignment;
};
#define REG_KB(feattype, outtype, realtype, indextype) \
REGISTER_KERNEL_BUILDER( \
Name("Open3DContinuousConvTransposeBackpropFilter") \
.Device(DEVICE_GPU) \
.TypeConstraint<feattype>("TFeat") \
.TypeConstraint<outtype>("output_type") \
.TypeConstraint<realtype>("TReal") \
.TypeConstraint<indextype>("TIndex"), \
ContinuousConvTransposeBackpropFilterOpKernelCUDA< \
feattype, outtype, realtype, indextype>);
REG_KB(float, float, float, int32)
#undef REG_KB
|
070c8e0afae97652318e7a31497d1f7b32f6e5eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel1_z_nonvector;
int xdim0_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim0_advec_mom_kernel1_z_nonvector;
int ydim0_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim1_advec_mom_kernel1_z_nonvector;
int xdim1_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim1_advec_mom_kernel1_z_nonvector;
int ydim1_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim2_advec_mom_kernel1_z_nonvector;
int xdim2_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim2_advec_mom_kernel1_z_nonvector;
int ydim2_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim3_advec_mom_kernel1_z_nonvector;
int xdim3_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim3_advec_mom_kernel1_z_nonvector;
int ydim3_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim4_advec_mom_kernel1_z_nonvector;
int xdim4_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim4_advec_mom_kernel1_z_nonvector;
int ydim4_advec_mom_kernel1_z_nonvector_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel1_z_nonvector * (y) + \
xdim0_advec_mom_kernel1_z_nonvector * ydim0_advec_mom_kernel1_z_nonvector * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel1_z_nonvector * (y) + \
xdim1_advec_mom_kernel1_z_nonvector * ydim1_advec_mom_kernel1_z_nonvector * \
(z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel1_z_nonvector * (y) + \
xdim2_advec_mom_kernel1_z_nonvector * ydim2_advec_mom_kernel1_z_nonvector * \
(z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel1_z_nonvector * (y) + \
xdim3_advec_mom_kernel1_z_nonvector * ydim3_advec_mom_kernel1_z_nonvector * \
(z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_advec_mom_kernel1_z_nonvector * (y) + \
xdim4_advec_mom_kernel1_z_nonvector * ydim4_advec_mom_kernel1_z_nonvector * \
(z))
// user function
__device__
inline void
advec_mom_kernel1_z_nonvector(const double *node_flux,
const double *node_mass_pre, double *mom_flux,
const double *celldz, const double *vel1) {
double sigma, wind, width;
double vdiffuw, vdiffdw, auw, adw, limiter;
int upwind, donor, downwind, dif;
double advec_vel_temp;
if ((node_flux[OPS_ACC0(0, 0, 0)]) < 0.0) {
upwind = 2;
donor = 1;
downwind = 0;
dif = donor;
} else {
upwind = -1;
donor = 0;
downwind = 1;
dif = upwind;
}
sigma =
fabs(node_flux[OPS_ACC0(0, 0, 0)]) / node_mass_pre[OPS_ACC1(0, 0, donor)];
width = celldz[OPS_ACC3(0, 0, 0)];
vdiffuw = vel1[OPS_ACC4(0, 0, donor)] - vel1[OPS_ACC4(0, 0, upwind)];
vdiffdw = vel1[OPS_ACC4(0, 0, downwind)] - vel1[OPS_ACC4(0, 0, donor)];
limiter = 0.0;
if (vdiffuw * vdiffdw > 0.0) {
auw = fabs(vdiffuw);
adw = fabs(vdiffdw);
wind = 1.0;
if (vdiffdw <= 0.0)
wind = -1.0;
limiter =
wind * MIN(width * ((2.0 - sigma) * adw / width +
(1.0 + sigma) * auw / celldz[OPS_ACC3(0, 0, dif)]) /
6.0,
MIN(auw, adw));
}
advec_vel_temp = vel1[OPS_ACC4(0, 0, donor)] + (1.0 - sigma) * limiter;
mom_flux[OPS_ACC2(0, 0, 0)] = advec_vel_temp * node_flux[OPS_ACC0(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_advec_mom_kernel1_z_nonvector(
const double *__restrict arg0, const double *__restrict arg1,
double *__restrict arg2, const double *__restrict arg3,
const double *__restrict arg4, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel1_z_nonvector +
idx_z * 1 * 1 * xdim0_advec_mom_kernel1_z_nonvector *
ydim0_advec_mom_kernel1_z_nonvector;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel1_z_nonvector +
idx_z * 1 * 1 * xdim1_advec_mom_kernel1_z_nonvector *
ydim1_advec_mom_kernel1_z_nonvector;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel1_z_nonvector +
idx_z * 1 * 1 * xdim2_advec_mom_kernel1_z_nonvector *
ydim2_advec_mom_kernel1_z_nonvector;
arg3 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim3_advec_mom_kernel1_z_nonvector +
idx_z * 1 * 1 * xdim3_advec_mom_kernel1_z_nonvector *
ydim3_advec_mom_kernel1_z_nonvector;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel1_z_nonvector +
idx_z * 1 * 1 * xdim4_advec_mom_kernel1_z_nonvector *
ydim4_advec_mom_kernel1_z_nonvector;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel1_z_nonvector(arg0, arg1, arg2, arg3, arg4);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel1_z_nonvector(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4) {
// Timing
double t1, t2, c1, c2;
ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 5, range, 35))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(35, "advec_mom_kernel1_z_nonvector");
OPS_kernels[35].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel1_z_nonvector_h ||
ydim0 != ydim0_advec_mom_kernel1_z_nonvector_h ||
xdim1 != xdim1_advec_mom_kernel1_z_nonvector_h ||
ydim1 != ydim1_advec_mom_kernel1_z_nonvector_h ||
xdim2 != xdim2_advec_mom_kernel1_z_nonvector_h ||
ydim2 != ydim2_advec_mom_kernel1_z_nonvector_h ||
xdim3 != xdim3_advec_mom_kernel1_z_nonvector_h ||
ydim3 != ydim3_advec_mom_kernel1_z_nonvector_h ||
xdim4 != xdim4_advec_mom_kernel1_z_nonvector_h ||
ydim4 != ydim4_advec_mom_kernel1_z_nonvector_h) {
hipMemcpyToSymbol(xdim0_advec_mom_kernel1_z_nonvector, &xdim0,
sizeof(int));
xdim0_advec_mom_kernel1_z_nonvector_h = xdim0;
hipMemcpyToSymbol(ydim0_advec_mom_kernel1_z_nonvector, &ydim0,
sizeof(int));
ydim0_advec_mom_kernel1_z_nonvector_h = ydim0;
hipMemcpyToSymbol(xdim1_advec_mom_kernel1_z_nonvector, &xdim1,
sizeof(int));
xdim1_advec_mom_kernel1_z_nonvector_h = xdim1;
hipMemcpyToSymbol(ydim1_advec_mom_kernel1_z_nonvector, &ydim1,
sizeof(int));
ydim1_advec_mom_kernel1_z_nonvector_h = ydim1;
hipMemcpyToSymbol(xdim2_advec_mom_kernel1_z_nonvector, &xdim2,
sizeof(int));
xdim2_advec_mom_kernel1_z_nonvector_h = xdim2;
hipMemcpyToSymbol(ydim2_advec_mom_kernel1_z_nonvector, &ydim2,
sizeof(int));
ydim2_advec_mom_kernel1_z_nonvector_h = ydim2;
hipMemcpyToSymbol(xdim3_advec_mom_kernel1_z_nonvector, &xdim3,
sizeof(int));
xdim3_advec_mom_kernel1_z_nonvector_h = xdim3;
hipMemcpyToSymbol(ydim3_advec_mom_kernel1_z_nonvector, &ydim3,
sizeof(int));
ydim3_advec_mom_kernel1_z_nonvector_h = ydim3;
hipMemcpyToSymbol(xdim4_advec_mom_kernel1_z_nonvector, &xdim4,
sizeof(int));
xdim4_advec_mom_kernel1_z_nonvector_h = xdim4;
hipMemcpyToSymbol(ydim4_advec_mom_kernel1_z_nonvector, &ydim4,
sizeof(int));
ydim4_advec_mom_kernel1_z_nonvector_h = ydim4;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
char *p_a[5];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args, 5, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[35].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_mom_kernel1_z_nonvector), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[35].time += t1 - t2;
}
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[35].mpi_time += t2 - t1;
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
| 070c8e0afae97652318e7a31497d1f7b32f6e5eb.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel1_z_nonvector;
int xdim0_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim0_advec_mom_kernel1_z_nonvector;
int ydim0_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim1_advec_mom_kernel1_z_nonvector;
int xdim1_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim1_advec_mom_kernel1_z_nonvector;
int ydim1_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim2_advec_mom_kernel1_z_nonvector;
int xdim2_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim2_advec_mom_kernel1_z_nonvector;
int ydim2_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim3_advec_mom_kernel1_z_nonvector;
int xdim3_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim3_advec_mom_kernel1_z_nonvector;
int ydim3_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim4_advec_mom_kernel1_z_nonvector;
int xdim4_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim4_advec_mom_kernel1_z_nonvector;
int ydim4_advec_mom_kernel1_z_nonvector_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel1_z_nonvector * (y) + \
xdim0_advec_mom_kernel1_z_nonvector * ydim0_advec_mom_kernel1_z_nonvector * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel1_z_nonvector * (y) + \
xdim1_advec_mom_kernel1_z_nonvector * ydim1_advec_mom_kernel1_z_nonvector * \
(z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel1_z_nonvector * (y) + \
xdim2_advec_mom_kernel1_z_nonvector * ydim2_advec_mom_kernel1_z_nonvector * \
(z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel1_z_nonvector * (y) + \
xdim3_advec_mom_kernel1_z_nonvector * ydim3_advec_mom_kernel1_z_nonvector * \
(z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_advec_mom_kernel1_z_nonvector * (y) + \
xdim4_advec_mom_kernel1_z_nonvector * ydim4_advec_mom_kernel1_z_nonvector * \
(z))
// user function
__device__
inline void
advec_mom_kernel1_z_nonvector(const double *node_flux,
const double *node_mass_pre, double *mom_flux,
const double *celldz, const double *vel1) {
double sigma, wind, width;
double vdiffuw, vdiffdw, auw, adw, limiter;
int upwind, donor, downwind, dif;
double advec_vel_temp;
if ((node_flux[OPS_ACC0(0, 0, 0)]) < 0.0) {
upwind = 2;
donor = 1;
downwind = 0;
dif = donor;
} else {
upwind = -1;
donor = 0;
downwind = 1;
dif = upwind;
}
sigma =
fabs(node_flux[OPS_ACC0(0, 0, 0)]) / node_mass_pre[OPS_ACC1(0, 0, donor)];
width = celldz[OPS_ACC3(0, 0, 0)];
vdiffuw = vel1[OPS_ACC4(0, 0, donor)] - vel1[OPS_ACC4(0, 0, upwind)];
vdiffdw = vel1[OPS_ACC4(0, 0, downwind)] - vel1[OPS_ACC4(0, 0, donor)];
limiter = 0.0;
if (vdiffuw * vdiffdw > 0.0) {
auw = fabs(vdiffuw);
adw = fabs(vdiffdw);
wind = 1.0;
if (vdiffdw <= 0.0)
wind = -1.0;
limiter =
wind * MIN(width * ((2.0 - sigma) * adw / width +
(1.0 + sigma) * auw / celldz[OPS_ACC3(0, 0, dif)]) /
6.0,
MIN(auw, adw));
}
advec_vel_temp = vel1[OPS_ACC4(0, 0, donor)] + (1.0 - sigma) * limiter;
mom_flux[OPS_ACC2(0, 0, 0)] = advec_vel_temp * node_flux[OPS_ACC0(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_advec_mom_kernel1_z_nonvector(
const double *__restrict arg0, const double *__restrict arg1,
double *__restrict arg2, const double *__restrict arg3,
const double *__restrict arg4, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel1_z_nonvector +
idx_z * 1 * 1 * xdim0_advec_mom_kernel1_z_nonvector *
ydim0_advec_mom_kernel1_z_nonvector;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel1_z_nonvector +
idx_z * 1 * 1 * xdim1_advec_mom_kernel1_z_nonvector *
ydim1_advec_mom_kernel1_z_nonvector;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel1_z_nonvector +
idx_z * 1 * 1 * xdim2_advec_mom_kernel1_z_nonvector *
ydim2_advec_mom_kernel1_z_nonvector;
arg3 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim3_advec_mom_kernel1_z_nonvector +
idx_z * 1 * 1 * xdim3_advec_mom_kernel1_z_nonvector *
ydim3_advec_mom_kernel1_z_nonvector;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel1_z_nonvector +
idx_z * 1 * 1 * xdim4_advec_mom_kernel1_z_nonvector *
ydim4_advec_mom_kernel1_z_nonvector;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel1_z_nonvector(arg0, arg1, arg2, arg3, arg4);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel1_z_nonvector(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4) {
// Timing
double t1, t2, c1, c2;
ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 5, range, 35))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(35, "advec_mom_kernel1_z_nonvector");
OPS_kernels[35].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel1_z_nonvector_h ||
ydim0 != ydim0_advec_mom_kernel1_z_nonvector_h ||
xdim1 != xdim1_advec_mom_kernel1_z_nonvector_h ||
ydim1 != ydim1_advec_mom_kernel1_z_nonvector_h ||
xdim2 != xdim2_advec_mom_kernel1_z_nonvector_h ||
ydim2 != ydim2_advec_mom_kernel1_z_nonvector_h ||
xdim3 != xdim3_advec_mom_kernel1_z_nonvector_h ||
ydim3 != ydim3_advec_mom_kernel1_z_nonvector_h ||
xdim4 != xdim4_advec_mom_kernel1_z_nonvector_h ||
ydim4 != ydim4_advec_mom_kernel1_z_nonvector_h) {
cudaMemcpyToSymbol(xdim0_advec_mom_kernel1_z_nonvector, &xdim0,
sizeof(int));
xdim0_advec_mom_kernel1_z_nonvector_h = xdim0;
cudaMemcpyToSymbol(ydim0_advec_mom_kernel1_z_nonvector, &ydim0,
sizeof(int));
ydim0_advec_mom_kernel1_z_nonvector_h = ydim0;
cudaMemcpyToSymbol(xdim1_advec_mom_kernel1_z_nonvector, &xdim1,
sizeof(int));
xdim1_advec_mom_kernel1_z_nonvector_h = xdim1;
cudaMemcpyToSymbol(ydim1_advec_mom_kernel1_z_nonvector, &ydim1,
sizeof(int));
ydim1_advec_mom_kernel1_z_nonvector_h = ydim1;
cudaMemcpyToSymbol(xdim2_advec_mom_kernel1_z_nonvector, &xdim2,
sizeof(int));
xdim2_advec_mom_kernel1_z_nonvector_h = xdim2;
cudaMemcpyToSymbol(ydim2_advec_mom_kernel1_z_nonvector, &ydim2,
sizeof(int));
ydim2_advec_mom_kernel1_z_nonvector_h = ydim2;
cudaMemcpyToSymbol(xdim3_advec_mom_kernel1_z_nonvector, &xdim3,
sizeof(int));
xdim3_advec_mom_kernel1_z_nonvector_h = xdim3;
cudaMemcpyToSymbol(ydim3_advec_mom_kernel1_z_nonvector, &ydim3,
sizeof(int));
ydim3_advec_mom_kernel1_z_nonvector_h = ydim3;
cudaMemcpyToSymbol(xdim4_advec_mom_kernel1_z_nonvector, &xdim4,
sizeof(int));
xdim4_advec_mom_kernel1_z_nonvector_h = xdim4;
cudaMemcpyToSymbol(ydim4_advec_mom_kernel1_z_nonvector, &ydim4,
sizeof(int));
ydim4_advec_mom_kernel1_z_nonvector_h = ydim4;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
char *p_a[5];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args, 5, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[35].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_advec_mom_kernel1_z_nonvector<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[35].time += t1 - t2;
}
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[35].mpi_time += t2 - t1;
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
|
2561b9db5da373f10e38877cc4e98c58ce772aa3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
__global__ void testKernel(float*x, int len)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < len) {
float sum = x[tid];
int iter = 0;
while(iter++ < len) {
sum += 1;
}
x[tid] = sum;
}
}
__global__ void subKernel (float*a, float*b, float*c, int len)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < len) {
c[tid] = a[tid] - b[tid];
}
}
int main(int argc, char **argv)
{
const int streamsNum = 2;
int N=1<<10; // 1K
if (argc == 2) {
N = atoi(argv[1]);
}
if (argc > 2) {
fprintf(stderr, "Too many arguments! ./stream_sync N .\n");
exit(1);
}
std::cout << "Running " << N << " (floats) as the input data size." << std::endl;
std::cout << "Launching " << streamsNum << " cuda streams." << std::endl;
// host
float *h_a = NULL;
float *h_b = NULL;
float *h_c = NULL;
hipHostMalloc((void**)&h_a, sizeof(float) * N);
hipHostMalloc((void**)&h_b, sizeof(float) * N);
hipHostMalloc((void**)&h_c, sizeof(float) * N);
// init
for(int i=0; i<N; i++) {
h_a[i] = 0;
h_b[i] = 0;
}
// device
float*d_a = NULL;
float*d_b = NULL;
float*d_c = NULL;
hipMalloc((void**)&d_a, sizeof(float) * N);
hipMalloc((void**)&d_b, sizeof(float) * N);
hipMalloc((void**)&d_c, sizeof(float) * N);
// streams
hipStream_t streams[streamsNum];
hipEvent_t events[streamsNum]; // events for streams
for(int i=0; i<streamsNum; i++) {
hipStreamCreate(&streams[i]);
hipEventCreate(&events[i]);
}
// h2d
hipMemcpyAsync(d_a, h_a, sizeof(float)*N, hipMemcpyHostToDevice, streams[0]);
hipMemcpyAsync(d_b, h_b, sizeof(float)*N, hipMemcpyHostToDevice, streams[1]);
// kernel
dim3 block = dim3(128,1,1);
dim3 grid = dim3((N + block.x - 1) / block.x,1,1);
hipLaunchKernelGGL(( testKernel) , dim3(grid), dim3(block), 0, streams[0] , d_a, N); // a + x
hipEventRecord(events[0], streams[0]);
hipLaunchKernelGGL(( testKernel) , dim3(grid), dim3(block), 0, streams[1] , d_b, N); // b + x
hipEventRecord(events[1], streams[1]);
hipEventSynchronize(events[0]);
hipEventSynchronize(events[1]);
hipLaunchKernelGGL(( subKernel) , dim3(grid), dim3(block), 0, streams[0] , d_a, d_b, d_c, N); // a - b
// d2h
hipMemcpyAsync(h_c, d_c, sizeof(float)*N, hipMemcpyDeviceToHost, streams[0]);
hipDeviceSynchronize(); // NOTE: this is needed to make sure prev dev opt is done!
int error_c = 0;
for(int i=0; i<N; i++) {
if(h_c[i] > 1e-8) { // h_c should be 0
printf("h_c[%d] = %f\n",i, h_c[i]);
error_c += 1;
}
}
if(error_c == 0) {
printf("Pass test on h_c!\n");
}
// free
for(int i=0; i<streamsNum; i++) {
hipStreamDestroy(streams[i]);
hipEventDestroy(events[i]);
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c);
return 0;
}
| 2561b9db5da373f10e38877cc4e98c58ce772aa3.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda_runtime.h>
__global__ void testKernel(float*x, int len)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < len) {
float sum = x[tid];
int iter = 0;
while(iter++ < len) {
sum += 1;
}
x[tid] = sum;
}
}
__global__ void subKernel (float*a, float*b, float*c, int len)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < len) {
c[tid] = a[tid] - b[tid];
}
}
int main(int argc, char **argv)
{
const int streamsNum = 2;
int N=1<<10; // 1K
if (argc == 2) {
N = atoi(argv[1]);
}
if (argc > 2) {
fprintf(stderr, "Too many arguments! ./stream_sync N .\n");
exit(1);
}
std::cout << "Running " << N << " (floats) as the input data size." << std::endl;
std::cout << "Launching " << streamsNum << " cuda streams." << std::endl;
// host
float *h_a = NULL;
float *h_b = NULL;
float *h_c = NULL;
cudaMallocHost((void**)&h_a, sizeof(float) * N);
cudaMallocHost((void**)&h_b, sizeof(float) * N);
cudaMallocHost((void**)&h_c, sizeof(float) * N);
// init
for(int i=0; i<N; i++) {
h_a[i] = 0;
h_b[i] = 0;
}
// device
float*d_a = NULL;
float*d_b = NULL;
float*d_c = NULL;
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_c, sizeof(float) * N);
// streams
cudaStream_t streams[streamsNum];
cudaEvent_t events[streamsNum]; // events for streams
for(int i=0; i<streamsNum; i++) {
cudaStreamCreate(&streams[i]);
cudaEventCreate(&events[i]);
}
// h2d
cudaMemcpyAsync(d_a, h_a, sizeof(float)*N, cudaMemcpyHostToDevice, streams[0]);
cudaMemcpyAsync(d_b, h_b, sizeof(float)*N, cudaMemcpyHostToDevice, streams[1]);
// kernel
dim3 block = dim3(128,1,1);
dim3 grid = dim3((N + block.x - 1) / block.x,1,1);
testKernel <<< grid, block, 0, streams[0] >>> (d_a, N); // a + x
cudaEventRecord(events[0], streams[0]);
testKernel <<< grid, block, 0, streams[1] >>> (d_b, N); // b + x
cudaEventRecord(events[1], streams[1]);
cudaEventSynchronize(events[0]);
cudaEventSynchronize(events[1]);
subKernel <<< grid, block, 0, streams[0] >>> (d_a, d_b, d_c, N); // a - b
// d2h
cudaMemcpyAsync(h_c, d_c, sizeof(float)*N, cudaMemcpyDeviceToHost, streams[0]);
cudaDeviceSynchronize(); // NOTE: this is needed to make sure prev dev opt is done!
int error_c = 0;
for(int i=0; i<N; i++) {
if(h_c[i] > 1e-8) { // h_c should be 0
printf("h_c[%d] = %f\n",i, h_c[i]);
error_c += 1;
}
}
if(error_c == 0) {
printf("Pass test on h_c!\n");
}
// free
for(int i=0; i<streamsNum; i++) {
cudaStreamDestroy(streams[i]);
cudaEventDestroy(events[i]);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
return 0;
}
|
1d34fc876d161d067a1c99c3064d371e27fb24aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <time.h>
#include <limits>
#define _USE_MATH_DEFINES
#include <math.h>
#include "inttypes.h"
#include "cudaErrorHadling.h"
float reduce(float *data, int32_t n);
float reduce1(float *data, int32_t n);
void initForPi(float *data, int32_t n);
const char * const printMemorySize(size_t bytes){
char inches[] = {' ', 'K', 'M', 'G', 'T'};
double sz = bytes;
int inch = 0;
for (; sz > 512 && inch < 5; ++inch){
sz /= 1024;
}
static char ret[64];
sprintf(ret, "%.2f %cB", sz, inches[inch]);
return ret;
}
float timer(){
static clock_t timer = 0;
if (!timer){
timer = clock();
return 0;
}
clock_t current = clock();
float ret = ((float)(current - timer))/CLOCKS_PER_SEC;
timer = current;
return ret;
}
bool ourRequirementsPassed(const hipDeviceProp_t & devProp){
return devProp.major >= 1;
}
int selectCUDADevice(){
int deviceCount = 0, suitableDevice = -1;
hipDeviceProp_t devProp;
hipGetDeviceCount( &deviceCount );
std::cout << "Found "<< deviceCount << " devices: \n";
for (int device = 0; device < deviceCount; ++device) {
hipGetDeviceProperties ( &devProp, device );
std::cout << "Device: " << device << std::endl;
std::cout << " Compute capability: " << devProp.major << "." << devProp.minor << std::endl;
std::cout << " Name: " << devProp.name << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << " Total Global Memory: " << printMemorySize(devProp.totalGlobalMem) << std::endl;
std::cout << " Shared Memory Per Block: " << printMemorySize(devProp.sharedMemPerBlock) << std::endl;
std::cout << " Total Const Memory: " << printMemorySize(devProp.totalConstMem) << std::endl;
std::cout << " L2 Cache size: " << printMemorySize(devProp.l2CacheSize) << std::endl;
std::cout << " Memory bus width: " << printMemorySize(devProp.memoryBusWidth/8) << std::endl;
std::cout << " Memory frequency: " << devProp.memoryClockRate << " kHz" << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << " Multiprocessors: " << devProp.multiProcessorCount << std::endl;
std::cout << " Clock rate: " << devProp.clockRate << " kHz" << std::endl;
std::cout << " Warp Size: " << devProp.warpSize << std::endl;
std::cout << " Max grid size: " << "(" << devProp.maxGridSize[0] << ", " << devProp.maxGridSize[1] << ", " << devProp.maxGridSize[2] << ")" << std::endl;
std::cout << " Max block size: " << "(" << devProp.maxThreadsDim[0] << ", " << devProp.maxThreadsDim[1] << ", " << devProp.maxThreadsDim[2] << ")" << std::endl;
std::cout << " Max threads per multiprocessor: " << devProp.maxThreadsPerMultiProcessor << std::endl;
std::cout << " Max threads per block: " << devProp.maxThreadsPerBlock << std::endl;
std::cout << " Registers per block: " << devProp.regsPerBlock << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << std::endl;
if(suitableDevice < 0 && ourRequirementsPassed(devProp)){
suitableDevice = device;
}
}
return suitableDevice;
}
void initializeRandomArray(float *array, int length){
for(int i =0; i < length; ++i){
array[i] = ((float)(rand()%10))/length;
}
}
int main(int argc, char *argv[]){
//------------- Variables -----------
int n = 1024*1024*8;
hipEvent_t start, stop;
float timeGPU1 = 0.0, timeGPU2 = 0.0, timeGPU3 = 0.0;
size_t nb = n*sizeof(float);
float *aDev = NULL;
float sumDevice = 0.0;
//-----------------------------------
//--------- Command line -----------
if(argc > 1){
int tmp = atoi(argv[1]);
if (tmp > 1){
n = atoi(argv[1]);
}
}
//----------------------------------
//-------- Select device -----------
int device = selectCUDADevice();
if(device == -1) {
std::cout << "Can not find suitable device" << "\n";
return EXIT_FAILURE;
}
SAFE_CALL(hipSetDevice(device));
//-----------------------------------
//----- GPU memory allocation and initialization -------
SAFE_CALL( hipMalloc((void**)&aDev, nb) );
initForPi(aDev, n);
//------------------------------------------------------
//------ Create CUDA events ----------------------------
SAFE_CALL( hipEventCreate(&start) );
SAFE_CALL( hipEventCreate(&stop) );
//------------------------------------------------------
//------ Calculation on GPU first way --------------
SAFE_CALL( hipEventRecord(start, 0) );
float sum1 = reduce1(aDev, n);
SAFE_CALL( hipEventRecord(stop, 0) );
SAFE_CALL( hipEventSynchronize(stop) );
SAFE_CALL( hipEventElapsedTime(&timeGPU1, start, stop) );
//--------------------------------------
//------ Calculation on GPU second way --------------
SAFE_CALL( hipEventRecord(start, 0) );
float sum2 = reduce(aDev, n);
SAFE_CALL( hipEventRecord(stop, 0) );
SAFE_CALL( hipEventSynchronize(stop) );
SAFE_CALL( hipEventElapsedTime(&timeGPU2, start, stop) );
//--------------------------------------
double pi1 = sum1/n;
double pi2 = sum2/n;
printf("Pi1 = %e\tpi - Pi1 = %e\n", pi1, M_PI-pi1);
printf("Pi2 = %e\tpi - Pi2 = %e\n", pi2, M_PI-pi2);
printf("1. Processing time on GPU: %4.8f s\n", timeGPU1/1000.0);
printf("2. Processing time on GPU: %4.8f s\n", timeGPU2/1000.0);
return EXIT_SUCCESS;
}
| 1d34fc876d161d067a1c99c3064d371e27fb24aa.cu | #include <iostream>
#include <cuda_runtime.h>
#include <time.h>
#include <limits>
#define _USE_MATH_DEFINES
#include <math.h>
#include "inttypes.h"
#include "cudaErrorHadling.h"
float reduce(float *data, int32_t n);
float reduce1(float *data, int32_t n);
void initForPi(float *data, int32_t n);
const char * const printMemorySize(size_t bytes){
char inches[] = {' ', 'K', 'M', 'G', 'T'};
double sz = bytes;
int inch = 0;
for (; sz > 512 && inch < 5; ++inch){
sz /= 1024;
}
static char ret[64];
sprintf(ret, "%.2f %cB", sz, inches[inch]);
return ret;
}
float timer(){
static clock_t timer = 0;
if (!timer){
timer = clock();
return 0;
}
clock_t current = clock();
float ret = ((float)(current - timer))/CLOCKS_PER_SEC;
timer = current;
return ret;
}
bool ourRequirementsPassed(const cudaDeviceProp & devProp){
return devProp.major >= 1;
}
int selectCUDADevice(){
int deviceCount = 0, suitableDevice = -1;
cudaDeviceProp devProp;
cudaGetDeviceCount( &deviceCount );
std::cout << "Found "<< deviceCount << " devices: \n";
for (int device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties ( &devProp, device );
std::cout << "Device: " << device << std::endl;
std::cout << " Compute capability: " << devProp.major << "." << devProp.minor << std::endl;
std::cout << " Name: " << devProp.name << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << " Total Global Memory: " << printMemorySize(devProp.totalGlobalMem) << std::endl;
std::cout << " Shared Memory Per Block: " << printMemorySize(devProp.sharedMemPerBlock) << std::endl;
std::cout << " Total Const Memory: " << printMemorySize(devProp.totalConstMem) << std::endl;
std::cout << " L2 Cache size: " << printMemorySize(devProp.l2CacheSize) << std::endl;
std::cout << " Memory bus width: " << printMemorySize(devProp.memoryBusWidth/8) << std::endl;
std::cout << " Memory frequency: " << devProp.memoryClockRate << " kHz" << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << " Multiprocessors: " << devProp.multiProcessorCount << std::endl;
std::cout << " Clock rate: " << devProp.clockRate << " kHz" << std::endl;
std::cout << " Warp Size: " << devProp.warpSize << std::endl;
std::cout << " Max grid size: " << "(" << devProp.maxGridSize[0] << ", " << devProp.maxGridSize[1] << ", " << devProp.maxGridSize[2] << ")" << std::endl;
std::cout << " Max block size: " << "(" << devProp.maxThreadsDim[0] << ", " << devProp.maxThreadsDim[1] << ", " << devProp.maxThreadsDim[2] << ")" << std::endl;
std::cout << " Max threads per multiprocessor: " << devProp.maxThreadsPerMultiProcessor << std::endl;
std::cout << " Max threads per block: " << devProp.maxThreadsPerBlock << std::endl;
std::cout << " Registers per block: " << devProp.regsPerBlock << std::endl;
std::cout << "----------------------------------------" << std::endl;
std::cout << std::endl;
if(suitableDevice < 0 && ourRequirementsPassed(devProp)){
suitableDevice = device;
}
}
return suitableDevice;
}
void initializeRandomArray(float *array, int length){
for(int i =0; i < length; ++i){
array[i] = ((float)(rand()%10))/length;
}
}
int main(int argc, char *argv[]){
//------------- Variables -----------
int n = 1024*1024*8;
cudaEvent_t start, stop;
float timeGPU1 = 0.0, timeGPU2 = 0.0, timeGPU3 = 0.0;
size_t nb = n*sizeof(float);
float *aDev = NULL;
float sumDevice = 0.0;
//-----------------------------------
//--------- Command line -----------
if(argc > 1){
int tmp = atoi(argv[1]);
if (tmp > 1){
n = atoi(argv[1]);
}
}
//----------------------------------
//-------- Select device -----------
int device = selectCUDADevice();
if(device == -1) {
std::cout << "Can not find suitable device" << "\n";
return EXIT_FAILURE;
}
SAFE_CALL(cudaSetDevice(device));
//-----------------------------------
//----- GPU memory allocation and initialization -------
SAFE_CALL( cudaMalloc((void**)&aDev, nb) );
initForPi(aDev, n);
//------------------------------------------------------
//------ Create CUDA events ----------------------------
SAFE_CALL( cudaEventCreate(&start) );
SAFE_CALL( cudaEventCreate(&stop) );
//------------------------------------------------------
//------ Calculation on GPU first way --------------
SAFE_CALL( cudaEventRecord(start, 0) );
float sum1 = reduce1(aDev, n);
SAFE_CALL( cudaEventRecord(stop, 0) );
SAFE_CALL( cudaEventSynchronize(stop) );
SAFE_CALL( cudaEventElapsedTime(&timeGPU1, start, stop) );
//--------------------------------------
//------ Calculation on GPU second way --------------
SAFE_CALL( cudaEventRecord(start, 0) );
float sum2 = reduce(aDev, n);
SAFE_CALL( cudaEventRecord(stop, 0) );
SAFE_CALL( cudaEventSynchronize(stop) );
SAFE_CALL( cudaEventElapsedTime(&timeGPU2, start, stop) );
//--------------------------------------
double pi1 = sum1/n;
double pi2 = sum2/n;
printf("Pi1 = %e\tpi - Pi1 = %e\n", pi1, M_PI-pi1);
printf("Pi2 = %e\tpi - Pi2 = %e\n", pi2, M_PI-pi2);
printf("1. Processing time on GPU: %4.8f s\n", timeGPU1/1000.0);
printf("2. Processing time on GPU: %4.8f s\n", timeGPU2/1000.0);
return EXIT_SUCCESS;
}
|
448379697c741896c63ff303045386aeb509f3c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_initialise_chunk_kernel_zero_x [1][1];
static int dims_initialise_chunk_kernel_zero_x_h [1][1] = {0};
//user function
__device__
void initialise_chunk_kernel_zero_x_gpu(ACC<double> &var) {
var(0,0) = 0.0;
}
__global__ void ops_initialise_chunk_kernel_zero_x(
double* __restrict arg0,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 0*1 * dims_initialise_chunk_kernel_zero_x[0][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_initialise_chunk_kernel_zero_x[0][0], arg0);
initialise_chunk_kernel_zero_x_gpu(argp0);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_zero_x(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
#else
void ops_par_loop_initialise_chunk_kernel_zero_x_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[1] = { arg0};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,1,range,6)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(6,"initialise_chunk_kernel_zero_x");
OPS_kernels[6].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 1,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
if (xdim0 != dims_initialise_chunk_kernel_zero_x_h[0][0]) {
dims_initialise_chunk_kernel_zero_x_h[0][0] = xdim0;
cutilSafeCall(hipMemcpyToSymbol( dims_initialise_chunk_kernel_zero_x, dims_initialise_chunk_kernel_zero_x_h, sizeof(dims_initialise_chunk_kernel_zero_x)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
char *p_a[1];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 1);
ops_halo_exchanges(args,1,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[6].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_initialise_chunk_kernel_zero_x), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0],x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[6].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 1);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[6].mpi_time += t2-t1;
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
#ifdef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_zero_x(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 6;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 6;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 1;
desc->args = (ops_arg*)malloc(1*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->function = ops_par_loop_initialise_chunk_kernel_zero_x_execute;
if (OPS_diags > 1) {
ops_timing_realloc(6,"initialise_chunk_kernel_zero_x");
}
ops_enqueue_kernel(desc);
}
#endif
| 448379697c741896c63ff303045386aeb509f3c7.cu | //
// auto-generated by ops.py
//
__constant__ int dims_initialise_chunk_kernel_zero_x [1][1];
static int dims_initialise_chunk_kernel_zero_x_h [1][1] = {0};
//user function
__device__
void initialise_chunk_kernel_zero_x_gpu(ACC<double> &var) {
var(0,0) = 0.0;
}
__global__ void ops_initialise_chunk_kernel_zero_x(
double* __restrict arg0,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 0*1 * dims_initialise_chunk_kernel_zero_x[0][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_initialise_chunk_kernel_zero_x[0][0], arg0);
initialise_chunk_kernel_zero_x_gpu(argp0);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_zero_x(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
#else
void ops_par_loop_initialise_chunk_kernel_zero_x_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[1] = { arg0};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,1,range,6)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(6,"initialise_chunk_kernel_zero_x");
OPS_kernels[6].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 1,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
if (xdim0 != dims_initialise_chunk_kernel_zero_x_h[0][0]) {
dims_initialise_chunk_kernel_zero_x_h[0][0] = xdim0;
cutilSafeCall(cudaMemcpyToSymbol( dims_initialise_chunk_kernel_zero_x, dims_initialise_chunk_kernel_zero_x_h, sizeof(dims_initialise_chunk_kernel_zero_x)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
char *p_a[1];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 1);
ops_halo_exchanges(args,1,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[6].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_initialise_chunk_kernel_zero_x<<<grid, tblock >>> ( (double *)p_a[0],x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[6].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 1);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[6].mpi_time += t2-t1;
OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
#ifdef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_zero_x(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 6;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 6;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 1;
desc->args = (ops_arg*)malloc(1*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->function = ops_par_loop_initialise_chunk_kernel_zero_x_execute;
if (OPS_diags > 1) {
ops_timing_realloc(6,"initialise_chunk_kernel_zero_x");
}
ops_enqueue_kernel(desc);
}
#endif
|
fd4f870864a44513116057f8090997ac19ef29c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lagrange_cuda.cuh"
__host__ __device__ float bilinear(float* Q, Point2D pt1, Point2D pt2, Point2D pt){
if (fabsf(pt1.x - pt.x) < ZERO_THRESH || fabsf(pt2.x - pt.x) < ZERO_THRESH){
return (pt2.y + pt1.y)/2.f;
}
else if (fabsf(pt1.y - pt.y) < ZERO_THRESH || fabsf(pt2.y - pt.y) < ZERO_THRESH){
return (pt2.x + pt1.x)/2.f;
}
else {
float first_term = 1 / ((pt2.x - pt1.x) * (pt2.y - pt1.y));
return first_term * (Q[0] * (pt1.x - pt.x) * (pt2.y - pt.y) +
Q[1] * (pt.x - pt1.x) * (pt2.y - pt.y) +
Q[2] * (pt2.x - pt.x) * (pt.y - pt1.y) +
Q[3] * (pt.x - pt1.x) * (pt.y - pt1.y));
}
}
__host__ __device__ float trilinear(float* Q, Point3D pt1, Point3D pt2, Point3D pt){
/**
* Q - 3d array
**/
Point2D bpt1 = {pt1.x, pt1.y};
Point2D bpt2 = {pt2.x, pt2.y};
Point2D bpt = {pt.x, pt.y};
float* Q1 = &Q[0];
float* Q2 = &Q[4];
if (pt1.z - pt.z == 0){
return bilinear(Q1, bpt1, bpt2, bpt);
}
else if (pt2.z - pt.z == 0){
return bilinear(Q2, bpt1, bpt2, bpt);
}
else if (pt2.z == pt1.z){
return bilinear(Q1, bpt1, bpt2, bpt);
}
else {
float zd = (pt.z - pt1.z) / (pt2.z - pt1.z);
float b1 = bilinear(Q1, bpt1, bpt2, bpt);
float b2 = bilinear(Q2, bpt1, bpt2, bpt);
return b1 * (1.f - zd) + b2 * zd;
}
}
__host__ __device__ void get_Q(float* Q, float* M, Point3D_int low, Point3D_int high){
// printf("%d ",low.z * (NX * NY) + low.y * NY + low.x );
Q[0] = M[low.z * (NX * NY) + low.y * NY + low.x ];
Q[1] = M[low.z * (NX * NY) + low.y * NY + high.x];
Q[2] = M[low.z * (NX * NY) + high.y * NY + low.x ];
Q[3] = M[low.z * (NX * NY) + high.y * NY + high.x];
Q[4] = M[high.z * (NX * NY) + low.y * NY + low.x ];
Q[5] = M[high.z * (NX * NY) + low.y * NY + high.x];
Q[6] = M[high.z * (NX * NY) + low.y * high.y + low.x ];
Q[7] = M[high.z * (NX * NY) + low.y * high.y + high.x];
}
__host__ __device__ Point3D get_next_rk4_k(Point3D pt, float* x_axis, float* y_axis, float* z_axis,
float* U, float* V, float* W, float* Q_u, float* Q_v, float* Q_w){
Point3D k = {
pt.x / (float) RESOLUTION,
pt.y / (float) RESOLUTION,
pt.z / (float) RESOLUTION
};
Point3D_int k_high = {
(int) ceilf(k.x),
(int) ceilf(k.y),
(int) ceilf(k.z)
};
Point3D_int k_low = {
(int) floorf(k.x),
(int) floorf(k.y),
(int) floorf(k.z)
};
// printf("%f %f %f\n", pt.x, pt.y, pt.z);
// printf("%d %d %d\n", k_high.x, k_high.y, k_high.z);
get_Q(Q_u, U, k_low, k_high);
get_Q(Q_v, V, k_low, k_high);
get_Q(Q_w, W, k_low, k_high);
Point3D low = {
x_axis[k_low.x],
y_axis[k_low.y],
z_axis[k_low.z]
};
Point3D high = {
x_axis[k_high.x],
y_axis[k_high.y],
z_axis[k_high.z]
};
Point3D d_dt = {
trilinear(Q_u, low, high, pt),
trilinear(Q_v, low, high, pt),
trilinear(Q_w, low, high, pt)
};
Point3D rk4_k = {
(float) DT * d_dt.x,
(float) DT * d_dt.y,
(float) DT * d_dt.z
};
return rk4_k;
}
__host__ __device__ Point3D lagrange3_step(Point3D pt, float* x_axis, float* y_axis, float* z_axis, float* U, float* V,
float* W, float* Q_u, float* Q_v, float* Q_w){
Point3D nan_point = {NAN, NAN, NAN};
Point3D k1 = get_next_rk4_k(pt, x_axis, y_axis, z_axis, U, V, W, Q_u, Q_v, Q_w);
Point3D tmp = {
pt.x + k1.x/2.f,
pt.y + k1.y/2.f,
pt.z + k1.z/2.f
};
if (tmp.x < (float) MINVAL || tmp.y < (float) MINVAL || tmp.z < (float) MINVAL ||
tmp.x > (float) (MAXVALX-RESOLUTION) || tmp.y > (float) (MAXVALY-RESOLUTION) || tmp.z > (float) (MAXVALZ-RESOLUTION)
|| tmp.x != tmp.x || tmp.y != tmp.y || tmp.z != tmp.z ){
return nan_point;
}
// printf("from step %f %f %f\n", tmp.x, tmp.y, tmp.z);
Point3D k2 = get_next_rk4_k(tmp, x_axis, y_axis, z_axis, U, V, W, Q_u, Q_v, Q_w);
tmp = (Point3D){
pt.x + k2.x/2.f,
pt.y + k2.y/2.f,
pt.z + k2.z/2.f
};
if (tmp.x < (float) MINVAL || tmp.y < (float) MINVAL || tmp.z < (float) MINVAL ||
tmp.x > (float) (MAXVALX-RESOLUTION) || tmp.y > (float) (MAXVALY-RESOLUTION) || tmp.z > (float) (MAXVALZ-RESOLUTION)
|| tmp.x != tmp.x || tmp.y != tmp.y || tmp.z != tmp.z ){
return nan_point;
}
Point3D k3 = get_next_rk4_k(tmp, x_axis, y_axis, z_axis, U, V, W, Q_u, Q_v, Q_w);
tmp = (Point3D){
pt.x + k3.x/2.f,
pt.y + k3.y/2.f,
pt.z + k3.z/2.f
};
if (tmp.x < (float) MINVAL || tmp.y < (float) MINVAL || tmp.z < (float) MINVAL ||
tmp.x > (float) (MAXVALX-RESOLUTION) || tmp.y > (float) (MAXVALY-RESOLUTION) || tmp.z > (float) (MAXVALZ-RESOLUTION)
|| tmp.x != tmp.x || tmp.y != tmp.y || tmp.z != tmp.z ){
return nan_point;
}
Point3D k4 = get_next_rk4_k(tmp, x_axis, y_axis, z_axis, U, V, W, Q_u, Q_v, Q_w);
Point3D result = {
pt.x + 1/6.f * (k1.x + 2*k2.x + 2*k3.x + k4.x),
pt.y + 1/6.f * (k1.y + 2*k2.y + 2*k3.y + k4.y),
pt.z + 1/6.f * (k1.z + 2*k2.z + 2*k3.z + k4.z)
};
return result;
}
__global__ void streamlines(int t, float* x_axis, float* y_axis, float* z_axis,
float* U, float* V, float* W,
float* Q_u, float* Q_v, float* Q_w,
Point3D* pt_sav){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = tid;
while (stride < N_PARTICLES){
if (pt_sav[stride].x < 0){
stride += gridDim.x * blockDim.x;
continue;
}
Point3D pt_i = pt_sav[stride];
Point3D lagrange = lagrange3_step(pt_i, x_axis, y_axis, z_axis, U, V, W, &Q_u[8*tid], &Q_v[8*tid], &Q_w[8*tid]);
if (lagrange.x >= MAXVALX || lagrange.y >= MAXVALY || lagrange.z >= MAXVALZ ||
lagrange.x <= MINVAL || lagrange.y <= MINVAL || lagrange.z <= MINVAL ||
lagrange.x == NAN || lagrange.y == NAN || lagrange.z == NAN){
pt_sav[stride].x = -1;
stride += gridDim.x * blockDim.x;
continue;
}
pt_sav[stride] = lagrange;
stride += gridDim.x * blockDim.x;
}
}
__global__ void setup_axes(float* x_axis, float* y_axis, float* z_axis){
// assign x axis values
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = tid;
while (stride < (int)(NX)){
float val = ((float) stride) * RESOLUTION;
x_axis[stride] = val;
y_axis[stride] = val;
stride += gridDim.x * blockDim.x;
}
stride = tid;
while (stride < (int)(NZ)){
float val = ((float) stride) * RESOLUTION;
z_axis[stride] = val;
stride += gridDim.x * blockDim.x;
}
}
__global__ void setup_pt_lagrange(Point3D* pt_lagrange, int npoints){
// initial points.
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
int tidz = threadIdx.z + blockIdx.z * blockDim.z;
int tid = tidz * blockDim.x * gridDim.x * blockDim.y * gridDim.y + tidy * blockDim.y * gridDim.y + tidx;
int stride = tid;
while (stride < npoints / 8){
pt_lagrange[stride].x = (float) tidx * RESOLUTION * 8;
pt_lagrange[stride].y = (float) tidy * RESOLUTION * 8;
pt_lagrange[stride].z = (float) tidz * RESOLUTION * 8;
stride += blockDim.x * blockDim.y * blockDim.z * gridDim.x * gridDim.y * gridDim.z;
}
}
//__global__ void setup_sav(Point3D* pt_sav, Point3D* pt_lagrange) {
// int tid = threadIdx.x + blockIdx.x * blockDim.x;
// int stride = tid;
// while (stride < N_PARTICLES) {
// pt_sav[stride] = pt_lagrange[stride];
// stride += gridDim.x * blockDim.x;
//
// }
//}
//
//
//void streamlines_driver(int t, float* x_axis, float* y_axis, float* z_axis,
// float* U, float* V, float* W,
// float* Q_u, float* Q_v, float* Q_w, Point3D* pt_sav){
// Point3D nan_point = {NAN, NAN, NAN};
//
//}
//
| fd4f870864a44513116057f8090997ac19ef29c0.cu | #include "lagrange_cuda.cuh"
__host__ __device__ float bilinear(float* Q, Point2D pt1, Point2D pt2, Point2D pt){
if (fabsf(pt1.x - pt.x) < ZERO_THRESH || fabsf(pt2.x - pt.x) < ZERO_THRESH){
return (pt2.y + pt1.y)/2.f;
}
else if (fabsf(pt1.y - pt.y) < ZERO_THRESH || fabsf(pt2.y - pt.y) < ZERO_THRESH){
return (pt2.x + pt1.x)/2.f;
}
else {
float first_term = 1 / ((pt2.x - pt1.x) * (pt2.y - pt1.y));
return first_term * (Q[0] * (pt1.x - pt.x) * (pt2.y - pt.y) +
Q[1] * (pt.x - pt1.x) * (pt2.y - pt.y) +
Q[2] * (pt2.x - pt.x) * (pt.y - pt1.y) +
Q[3] * (pt.x - pt1.x) * (pt.y - pt1.y));
}
}
__host__ __device__ float trilinear(float* Q, Point3D pt1, Point3D pt2, Point3D pt){
/**
* Q - 3d array
**/
Point2D bpt1 = {pt1.x, pt1.y};
Point2D bpt2 = {pt2.x, pt2.y};
Point2D bpt = {pt.x, pt.y};
float* Q1 = &Q[0];
float* Q2 = &Q[4];
if (pt1.z - pt.z == 0){
return bilinear(Q1, bpt1, bpt2, bpt);
}
else if (pt2.z - pt.z == 0){
return bilinear(Q2, bpt1, bpt2, bpt);
}
else if (pt2.z == pt1.z){
return bilinear(Q1, bpt1, bpt2, bpt);
}
else {
float zd = (pt.z - pt1.z) / (pt2.z - pt1.z);
float b1 = bilinear(Q1, bpt1, bpt2, bpt);
float b2 = bilinear(Q2, bpt1, bpt2, bpt);
return b1 * (1.f - zd) + b2 * zd;
}
}
__host__ __device__ void get_Q(float* Q, float* M, Point3D_int low, Point3D_int high){
// printf("%d ",low.z * (NX * NY) + low.y * NY + low.x );
Q[0] = M[low.z * (NX * NY) + low.y * NY + low.x ];
Q[1] = M[low.z * (NX * NY) + low.y * NY + high.x];
Q[2] = M[low.z * (NX * NY) + high.y * NY + low.x ];
Q[3] = M[low.z * (NX * NY) + high.y * NY + high.x];
Q[4] = M[high.z * (NX * NY) + low.y * NY + low.x ];
Q[5] = M[high.z * (NX * NY) + low.y * NY + high.x];
Q[6] = M[high.z * (NX * NY) + low.y * high.y + low.x ];
Q[7] = M[high.z * (NX * NY) + low.y * high.y + high.x];
}
__host__ __device__ Point3D get_next_rk4_k(Point3D pt, float* x_axis, float* y_axis, float* z_axis,
float* U, float* V, float* W, float* Q_u, float* Q_v, float* Q_w){
Point3D k = {
pt.x / (float) RESOLUTION,
pt.y / (float) RESOLUTION,
pt.z / (float) RESOLUTION
};
Point3D_int k_high = {
(int) ceilf(k.x),
(int) ceilf(k.y),
(int) ceilf(k.z)
};
Point3D_int k_low = {
(int) floorf(k.x),
(int) floorf(k.y),
(int) floorf(k.z)
};
// printf("%f %f %f\n", pt.x, pt.y, pt.z);
// printf("%d %d %d\n", k_high.x, k_high.y, k_high.z);
get_Q(Q_u, U, k_low, k_high);
get_Q(Q_v, V, k_low, k_high);
get_Q(Q_w, W, k_low, k_high);
Point3D low = {
x_axis[k_low.x],
y_axis[k_low.y],
z_axis[k_low.z]
};
Point3D high = {
x_axis[k_high.x],
y_axis[k_high.y],
z_axis[k_high.z]
};
Point3D d_dt = {
trilinear(Q_u, low, high, pt),
trilinear(Q_v, low, high, pt),
trilinear(Q_w, low, high, pt)
};
Point3D rk4_k = {
(float) DT * d_dt.x,
(float) DT * d_dt.y,
(float) DT * d_dt.z
};
return rk4_k;
}
__host__ __device__ Point3D lagrange3_step(Point3D pt, float* x_axis, float* y_axis, float* z_axis, float* U, float* V,
float* W, float* Q_u, float* Q_v, float* Q_w){
Point3D nan_point = {NAN, NAN, NAN};
Point3D k1 = get_next_rk4_k(pt, x_axis, y_axis, z_axis, U, V, W, Q_u, Q_v, Q_w);
Point3D tmp = {
pt.x + k1.x/2.f,
pt.y + k1.y/2.f,
pt.z + k1.z/2.f
};
if (tmp.x < (float) MINVAL || tmp.y < (float) MINVAL || tmp.z < (float) MINVAL ||
tmp.x > (float) (MAXVALX-RESOLUTION) || tmp.y > (float) (MAXVALY-RESOLUTION) || tmp.z > (float) (MAXVALZ-RESOLUTION)
|| tmp.x != tmp.x || tmp.y != tmp.y || tmp.z != tmp.z ){
return nan_point;
}
// printf("from step %f %f %f\n", tmp.x, tmp.y, tmp.z);
Point3D k2 = get_next_rk4_k(tmp, x_axis, y_axis, z_axis, U, V, W, Q_u, Q_v, Q_w);
tmp = (Point3D){
pt.x + k2.x/2.f,
pt.y + k2.y/2.f,
pt.z + k2.z/2.f
};
if (tmp.x < (float) MINVAL || tmp.y < (float) MINVAL || tmp.z < (float) MINVAL ||
tmp.x > (float) (MAXVALX-RESOLUTION) || tmp.y > (float) (MAXVALY-RESOLUTION) || tmp.z > (float) (MAXVALZ-RESOLUTION)
|| tmp.x != tmp.x || tmp.y != tmp.y || tmp.z != tmp.z ){
return nan_point;
}
Point3D k3 = get_next_rk4_k(tmp, x_axis, y_axis, z_axis, U, V, W, Q_u, Q_v, Q_w);
tmp = (Point3D){
pt.x + k3.x/2.f,
pt.y + k3.y/2.f,
pt.z + k3.z/2.f
};
if (tmp.x < (float) MINVAL || tmp.y < (float) MINVAL || tmp.z < (float) MINVAL ||
tmp.x > (float) (MAXVALX-RESOLUTION) || tmp.y > (float) (MAXVALY-RESOLUTION) || tmp.z > (float) (MAXVALZ-RESOLUTION)
|| tmp.x != tmp.x || tmp.y != tmp.y || tmp.z != tmp.z ){
return nan_point;
}
Point3D k4 = get_next_rk4_k(tmp, x_axis, y_axis, z_axis, U, V, W, Q_u, Q_v, Q_w);
Point3D result = {
pt.x + 1/6.f * (k1.x + 2*k2.x + 2*k3.x + k4.x),
pt.y + 1/6.f * (k1.y + 2*k2.y + 2*k3.y + k4.y),
pt.z + 1/6.f * (k1.z + 2*k2.z + 2*k3.z + k4.z)
};
return result;
}
__global__ void streamlines(int t, float* x_axis, float* y_axis, float* z_axis,
float* U, float* V, float* W,
float* Q_u, float* Q_v, float* Q_w,
Point3D* pt_sav){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = tid;
while (stride < N_PARTICLES){
if (pt_sav[stride].x < 0){
stride += gridDim.x * blockDim.x;
continue;
}
Point3D pt_i = pt_sav[stride];
Point3D lagrange = lagrange3_step(pt_i, x_axis, y_axis, z_axis, U, V, W, &Q_u[8*tid], &Q_v[8*tid], &Q_w[8*tid]);
if (lagrange.x >= MAXVALX || lagrange.y >= MAXVALY || lagrange.z >= MAXVALZ ||
lagrange.x <= MINVAL || lagrange.y <= MINVAL || lagrange.z <= MINVAL ||
lagrange.x == NAN || lagrange.y == NAN || lagrange.z == NAN){
pt_sav[stride].x = -1;
stride += gridDim.x * blockDim.x;
continue;
}
pt_sav[stride] = lagrange;
stride += gridDim.x * blockDim.x;
}
}
__global__ void setup_axes(float* x_axis, float* y_axis, float* z_axis){
// assign x axis values
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = tid;
while (stride < (int)(NX)){
float val = ((float) stride) * RESOLUTION;
x_axis[stride] = val;
y_axis[stride] = val;
stride += gridDim.x * blockDim.x;
}
stride = tid;
while (stride < (int)(NZ)){
float val = ((float) stride) * RESOLUTION;
z_axis[stride] = val;
stride += gridDim.x * blockDim.x;
}
}
__global__ void setup_pt_lagrange(Point3D* pt_lagrange, int npoints){
// initial points.
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
int tidz = threadIdx.z + blockIdx.z * blockDim.z;
int tid = tidz * blockDim.x * gridDim.x * blockDim.y * gridDim.y + tidy * blockDim.y * gridDim.y + tidx;
int stride = tid;
while (stride < npoints / 8){
pt_lagrange[stride].x = (float) tidx * RESOLUTION * 8;
pt_lagrange[stride].y = (float) tidy * RESOLUTION * 8;
pt_lagrange[stride].z = (float) tidz * RESOLUTION * 8;
stride += blockDim.x * blockDim.y * blockDim.z * gridDim.x * gridDim.y * gridDim.z;
}
}
//__global__ void setup_sav(Point3D* pt_sav, Point3D* pt_lagrange) {
// int tid = threadIdx.x + blockIdx.x * blockDim.x;
// int stride = tid;
// while (stride < N_PARTICLES) {
// pt_sav[stride] = pt_lagrange[stride];
// stride += gridDim.x * blockDim.x;
//
// }
//}
//
//
//void streamlines_driver(int t, float* x_axis, float* y_axis, float* z_axis,
// float* U, float* V, float* W,
// float* Q_u, float* Q_v, float* Q_w, Point3D* pt_sav){
// Point3D nan_point = {NAN, NAN, NAN};
//
//}
//
|
43da49988a99a6f6fcd2580f340dcb54b76283e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "../inc/mgpu_header.h"
#include "../inc/sufsort_util.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
template<typename T>
void check_correctness(T *keys1, T *keys2, T *values1, T *values2, uint32 size)
{
printf("checking correctness...\n");
int wrong = 0;
for (uint32 i = 0; i < size-1; i++)
if (keys1[i] > keys1[i+1] || keys1[i] != keys2[i] || values1[i] != values2[i])
wrong++;
// for (uint32 i = 0; i < size; i++)
// printf("%u %u, %u %u\n", keys1[i], keys2[i], values1[i], values2[i]);
if (!wrong)
printf("status: passed\n");
else
{
printf("status: failed\n");
printf("number of wrong positions: %u\n", wrong);
}
}
template<typename T>
void generate_data(T *keys, T *values, uint32 size)
{
printf("generating data...\n");
srand(time(NULL));
for (uint32 i = 0; i < size; i++)
{
keys[i] = rand()%2097152;
values[i] = rand();
// keys[i] = i%4;
// values[i] = i%4;
}
}
int main(int argc, char ** argv)
{
uint32 size = 20479920;
uint32 round_size = 20480000;
ContextPtr context;
sortEngine_t engine;
MgpuSortData data;
init_mgpu_engine(context, engine, 0);
uint32 *h_keys_thrust = (uint32*)allocate_pageable_memory(sizeof(uint32)*round_size);
uint32 *h_values_thrust = (uint32*)allocate_pageable_memory(sizeof(uint32)*round_size);
uint32 *h_keys_mgpu = (uint32*)allocate_pageable_memory(sizeof(uint32)*round_size);
uint32 *h_values_mgpu = (uint32*)allocate_pageable_memory(sizeof(uint32)*round_size);
uint32 *d_keys_mgpu = (uint32*)allocate_device_memory(sizeof(uint32)*round_size);
uint32 *d_values_mgpu = (uint32*)allocate_device_memory(sizeof(uint32)*round_size);
uint32 *d_keys_thrust = (uint32*)allocate_device_memory(sizeof(uint32)*round_size);
uint32 *d_values_thrust = (uint32*)allocate_device_memory(sizeof(uint32)*round_size);
generate_data<uint32>(h_keys_thrust, h_values_thrust, size);
mem_host2device(h_keys_thrust, d_keys_thrust, sizeof(uint32)*size);
mem_host2device(h_keys_thrust, d_keys_mgpu, sizeof(uint32)*size);
mem_host2device(h_values_thrust, d_values_thrust, sizeof(uint32)*size);
mem_host2device(h_values_thrust, d_values_mgpu, sizeof(uint32)*size);
alloc_mgpu_data(engine, data, size);
mgpu_sort(engine, data, d_keys_mgpu, d_values_mgpu, size, 26);
thrust::device_ptr<uint32> d_key_ptr = thrust::device_pointer_cast(d_keys_thrust);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_values_thrust);
thrust::stable_sort_by_key(d_key_ptr, d_key_ptr+size, d_value_ptr);
HANDLE_ERROR(hipDeviceSynchronize());
mem_device2host(d_keys_thrust, h_keys_thrust, sizeof(uint32)*size);
mem_device2host(d_keys_mgpu, h_keys_mgpu, sizeof(uint32)*size);
mem_device2host(d_values_thrust, h_values_thrust, sizeof(uint32)*size);
mem_device2host(d_values_mgpu, h_values_mgpu, sizeof(uint32)*size);
HANDLE_ERROR(hipDeviceSynchronize());
check_correctness<uint32>(h_keys_thrust, h_keys_mgpu, h_values_thrust, h_values_mgpu, size);
free_pageable_memory(h_keys_thrust);
free_pageable_memory(h_values_thrust);
free_pageable_memory(h_keys_mgpu);
free_pageable_memory(h_values_mgpu);
free_device_memory(d_keys_thrust);
free_device_memory(d_values_thrust);
free_device_memory(d_keys_mgpu);
free_device_memory(d_values_mgpu);
release_mgpu_engine(engine, data);
return 0;
}
| 43da49988a99a6f6fcd2580f340dcb54b76283e8.cu | #include "../inc/mgpu_header.h"
#include "../inc/sufsort_util.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
template<typename T>
void check_correctness(T *keys1, T *keys2, T *values1, T *values2, uint32 size)
{
printf("checking correctness...\n");
int wrong = 0;
for (uint32 i = 0; i < size-1; i++)
if (keys1[i] > keys1[i+1] || keys1[i] != keys2[i] || values1[i] != values2[i])
wrong++;
// for (uint32 i = 0; i < size; i++)
// printf("%u %u, %u %u\n", keys1[i], keys2[i], values1[i], values2[i]);
if (!wrong)
printf("status: passed\n");
else
{
printf("status: failed\n");
printf("number of wrong positions: %u\n", wrong);
}
}
template<typename T>
void generate_data(T *keys, T *values, uint32 size)
{
printf("generating data...\n");
srand(time(NULL));
for (uint32 i = 0; i < size; i++)
{
keys[i] = rand()%2097152;
values[i] = rand();
// keys[i] = i%4;
// values[i] = i%4;
}
}
int main(int argc, char ** argv)
{
uint32 size = 20479920;
uint32 round_size = 20480000;
ContextPtr context;
sortEngine_t engine;
MgpuSortData data;
init_mgpu_engine(context, engine, 0);
uint32 *h_keys_thrust = (uint32*)allocate_pageable_memory(sizeof(uint32)*round_size);
uint32 *h_values_thrust = (uint32*)allocate_pageable_memory(sizeof(uint32)*round_size);
uint32 *h_keys_mgpu = (uint32*)allocate_pageable_memory(sizeof(uint32)*round_size);
uint32 *h_values_mgpu = (uint32*)allocate_pageable_memory(sizeof(uint32)*round_size);
uint32 *d_keys_mgpu = (uint32*)allocate_device_memory(sizeof(uint32)*round_size);
uint32 *d_values_mgpu = (uint32*)allocate_device_memory(sizeof(uint32)*round_size);
uint32 *d_keys_thrust = (uint32*)allocate_device_memory(sizeof(uint32)*round_size);
uint32 *d_values_thrust = (uint32*)allocate_device_memory(sizeof(uint32)*round_size);
generate_data<uint32>(h_keys_thrust, h_values_thrust, size);
mem_host2device(h_keys_thrust, d_keys_thrust, sizeof(uint32)*size);
mem_host2device(h_keys_thrust, d_keys_mgpu, sizeof(uint32)*size);
mem_host2device(h_values_thrust, d_values_thrust, sizeof(uint32)*size);
mem_host2device(h_values_thrust, d_values_mgpu, sizeof(uint32)*size);
alloc_mgpu_data(engine, data, size);
mgpu_sort(engine, data, d_keys_mgpu, d_values_mgpu, size, 26);
thrust::device_ptr<uint32> d_key_ptr = thrust::device_pointer_cast(d_keys_thrust);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_values_thrust);
thrust::stable_sort_by_key(d_key_ptr, d_key_ptr+size, d_value_ptr);
HANDLE_ERROR(cudaDeviceSynchronize());
mem_device2host(d_keys_thrust, h_keys_thrust, sizeof(uint32)*size);
mem_device2host(d_keys_mgpu, h_keys_mgpu, sizeof(uint32)*size);
mem_device2host(d_values_thrust, h_values_thrust, sizeof(uint32)*size);
mem_device2host(d_values_mgpu, h_values_mgpu, sizeof(uint32)*size);
HANDLE_ERROR(cudaDeviceSynchronize());
check_correctness<uint32>(h_keys_thrust, h_keys_mgpu, h_values_thrust, h_values_mgpu, size);
free_pageable_memory(h_keys_thrust);
free_pageable_memory(h_values_thrust);
free_pageable_memory(h_keys_mgpu);
free_pageable_memory(h_values_mgpu);
free_device_memory(d_keys_thrust);
free_device_memory(d_values_thrust);
free_device_memory(d_keys_mgpu);
free_device_memory(d_values_mgpu);
release_mgpu_engine(engine, data);
return 0;
}
|
8f1f1d96ce329753431a00e0d0038a4c07d2c799.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nodes/gaussian_kernel.h"
__global__
void GaussianWeightsKernel(const int n, const float sigma, const int window_size, const int num_channels, float * w)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
int indexTemp = i;
const int x = indexTemp % window_size;
indexTemp /= window_size;
const int y = indexTemp % window_size;
indexTemp /= window_size;
const int input_channel = indexTemp % num_channels;
indexTemp /= num_channels;
const int output_channel = indexTemp;
if (output_channel == input_channel) {
float half_window_size = (float)window_size / 2.0f;
if (sigma != 0) {
float xx = (x - half_window_size) * (x - half_window_size);
float yy = (y - half_window_size) * (y - half_window_size);
float ss = sigma * sigma;
w[i] = 1.0f / (2.0f * 3.141592f * ss) * exp(-0.5f * (xx + yy) / ss);
}
}
else {
w[i] = 0;
}
}
}
ConvGaussianKernel::ConvGaussianKernel(deepflow::NodeParam * param) : Node(param)
{
LOG_IF(FATAL, param->has_gaussian_kernel_param() == false) << "param.has_gaussian_kernel_param() == false";
}
void ConvGaussianKernel::setSigma(float value)
{
if (value != _current_sigma) {
_current_sigma = value;
_param->mutable_gaussian_kernel_param()->set_sigma(_current_sigma);
generate();
}
}
void ConvGaussianKernel::init()
{
auto gparam = _param->gaussian_kernel_param();
_window_size = gparam.window_size();
_current_sigma = gparam.sigma();
if (gparam.num_channels())
_num_channels = gparam.num_channels();
std::array<int, 4> dims = { _num_channels, _num_channels, _window_size, _window_size};
_outputs[0]->initValue(dims);
generate();
}
void ConvGaussianKernel::forward()
{
}
void ConvGaussianKernel::backward()
{
}
std::string ConvGaussianKernel::to_cpp() const
{
return std::string();
}
void ConvGaussianKernel::generate()
{
auto size = _outputs[0]->value()->size();
GaussianWeightsKernel << < numOfBlocks(size), maxThreadsPerBlock >> > (size, _current_sigma, _window_size, _num_channels, (float*)_outputs[0]->value()->gpu_data());
DF_KERNEL_CHECK();
}
| 8f1f1d96ce329753431a00e0d0038a4c07d2c799.cu | #include "nodes/gaussian_kernel.h"
__global__
void GaussianWeightsKernel(const int n, const float sigma, const int window_size, const int num_channels, float * w)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
int indexTemp = i;
const int x = indexTemp % window_size;
indexTemp /= window_size;
const int y = indexTemp % window_size;
indexTemp /= window_size;
const int input_channel = indexTemp % num_channels;
indexTemp /= num_channels;
const int output_channel = indexTemp;
if (output_channel == input_channel) {
float half_window_size = (float)window_size / 2.0f;
if (sigma != 0) {
float xx = (x - half_window_size) * (x - half_window_size);
float yy = (y - half_window_size) * (y - half_window_size);
float ss = sigma * sigma;
w[i] = 1.0f / (2.0f * 3.141592f * ss) * exp(-0.5f * (xx + yy) / ss);
}
}
else {
w[i] = 0;
}
}
}
ConvGaussianKernel::ConvGaussianKernel(deepflow::NodeParam * param) : Node(param)
{
LOG_IF(FATAL, param->has_gaussian_kernel_param() == false) << "param.has_gaussian_kernel_param() == false";
}
void ConvGaussianKernel::setSigma(float value)
{
if (value != _current_sigma) {
_current_sigma = value;
_param->mutable_gaussian_kernel_param()->set_sigma(_current_sigma);
generate();
}
}
void ConvGaussianKernel::init()
{
auto gparam = _param->gaussian_kernel_param();
_window_size = gparam.window_size();
_current_sigma = gparam.sigma();
if (gparam.num_channels())
_num_channels = gparam.num_channels();
std::array<int, 4> dims = { _num_channels, _num_channels, _window_size, _window_size};
_outputs[0]->initValue(dims);
generate();
}
void ConvGaussianKernel::forward()
{
}
void ConvGaussianKernel::backward()
{
}
std::string ConvGaussianKernel::to_cpp() const
{
return std::string();
}
void ConvGaussianKernel::generate()
{
auto size = _outputs[0]->value()->size();
GaussianWeightsKernel << < numOfBlocks(size), maxThreadsPerBlock >> > (size, _current_sigma, _window_size, _num_channels, (float*)_outputs[0]->value()->gpu_data());
DF_KERNEL_CHECK();
}
|
5e24b4b9f3bb8266074469832d9253555acf9236.hip | // !!! This is a file automatically generated by hipify!!!
// nvcc -O3 -std=c++11 -use_fast_math -ccbin g++ -arch=compute_75 -code=sm_75 -expt-relaxed-constexpr
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_fp16.h>
#include <mma.h>
#include <assert.h>
#include "common.h"
#define M 1024
#define N 1024
#define NUM_THREADS_PER_BLOCK 1024
#define vec 4
using namespace std;
__host__ void init_host_matrices(float *c){
for (int t = 0; t < M * N; t++) {
c[t] = (float) 0.0f;
}
}
__host__ void printMatrixFloat(float* matrix, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
printf("%f ", (float)matrix[i * n + j]);
}
printf("\n");
}
printf("\n");
}
__global__ void pwAdd(float *c, int m, int n){
float cst = 5;
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < m * n; i += (((m * n) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec)) * blockDim.x * vec) {
// Calculate this block's starting address.
float *base = c + (i * vec);
float4 *cGmem = (float4*)base;
float4 cData = *(cGmem);
cData.w = cData.w + cst;
cData.x = cData.x + cst;
cData.y = cData.y + cst;
cData.z = cData.z + cst;
*(cGmem) = cData;
//printf("%f\n",(float)cData.w);
}
}
int main() {
float *d_c, *h_c, *h_c_gpu_res;
int m, n;
m = M;
n = N;
h_c = (float*) malloc(m * n * sizeof(float));
h_c_gpu_res = (float*) malloc(m * n * sizeof(float));
check_cuda_error(hipMalloc(&d_c, m * n * sizeof(float)));
assert(((unsigned long long)d_c) % 128 == 0);
init_host_matrices(h_c);
check_cuda_error(hipMemcpy(d_c, h_c, m * n * sizeof(float), hipMemcpyHostToDevice));
dim3 block(NUM_THREADS_PER_BLOCK, 1, 1);
dim3 grid(((m * n) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec), 1, 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
hipLaunchKernelGGL(( pwAdd), dim3(grid), dim3(block), 0, 0, d_c, m , n);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
check_cuda_error(hipPeekAtLastError());
//cout<<"time: "<<msecTotal<<"ms \n";
#ifdef PRINT_HOST
check_cuda_error(hipDeviceSynchronize());
hipMemcpy(h_c_gpu_res, d_c, m * n * sizeof(float), hipMemcpyDeviceToHost);
check_cuda_error(hipDeviceSynchronize());
printMatrixFloat(h_c_gpu_res, m, n);
#endif
free(h_c);
free(h_c_gpu_res);
hipFree(d_c);
return 0;
}
| 5e24b4b9f3bb8266074469832d9253555acf9236.cu | // nvcc -O3 -std=c++11 -use_fast_math -ccbin g++ -arch=compute_75 -code=sm_75 -expt-relaxed-constexpr
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <cuda_fp16.h>
#include <mma.h>
#include <assert.h>
#include "common.h"
#define M 1024
#define N 1024
#define NUM_THREADS_PER_BLOCK 1024
#define vec 4
using namespace std;
__host__ void init_host_matrices(float *c){
for (int t = 0; t < M * N; t++) {
c[t] = (float) 0.0f;
}
}
__host__ void printMatrixFloat(float* matrix, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
printf("%f ", (float)matrix[i * n + j]);
}
printf("\n");
}
printf("\n");
}
__global__ void pwAdd(float *c, int m, int n){
float cst = 5;
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < m * n; i += (((m * n) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec)) * blockDim.x * vec) {
// Calculate this block's starting address.
float *base = c + (i * vec);
float4 *cGmem = (float4*)base;
float4 cData = *(cGmem);
cData.w = cData.w + cst;
cData.x = cData.x + cst;
cData.y = cData.y + cst;
cData.z = cData.z + cst;
*(cGmem) = cData;
//printf("%f\n",(float)cData.w);
}
}
int main() {
float *d_c, *h_c, *h_c_gpu_res;
int m, n;
m = M;
n = N;
h_c = (float*) malloc(m * n * sizeof(float));
h_c_gpu_res = (float*) malloc(m * n * sizeof(float));
check_cuda_error(cudaMalloc(&d_c, m * n * sizeof(float)));
assert(((unsigned long long)d_c) % 128 == 0);
init_host_matrices(h_c);
check_cuda_error(cudaMemcpy(d_c, h_c, m * n * sizeof(float), cudaMemcpyHostToDevice));
dim3 block(NUM_THREADS_PER_BLOCK, 1, 1);
dim3 grid(((m * n) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec), 1, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
pwAdd<<<grid, block>>>(d_c, m , n);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
check_cuda_error(cudaPeekAtLastError());
//cout<<"time: "<<msecTotal<<"ms \n";
#ifdef PRINT_HOST
check_cuda_error(cudaDeviceSynchronize());
cudaMemcpy(h_c_gpu_res, d_c, m * n * sizeof(float), cudaMemcpyDeviceToHost);
check_cuda_error(cudaDeviceSynchronize());
printMatrixFloat(h_c_gpu_res, m, n);
#endif
free(h_c);
free(h_c_gpu_res);
cudaFree(d_c);
return 0;
}
|
5036420d96b827e532b044b79bf93eefd9cc5da8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* =====================================================================================
*
* Filename: jacobi_cpu.c
*
* Description:
*
* Version: 1.0
* Created: 12/05/11 02:30:51
* Revision: none
* Compiler: gcc
*
* Author: YOUR NAME (),
* Company:
*
* =====================================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define SIZE 8192
#define BLOCK_SIZE 32
float ratio(float**u,float ant,int iter)
{
float tmp=0.0;
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
if(u[i][j]>tmp)
tmp=u[i][j];
}
}
if(iter%10==0)
printf(" iter=%d ratio=%f max=%f\n",iter,tmp/ant,tmp);
return tmp;
}
void muestra(float**u)
{
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
printf("%f ",u[i][j]);
}
printf("\n");
}
}
__global__ void jacobi(float *d_u_new,float *d_u, float *d_f,float h,float val)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i>0 && j>0 && i<SIZE-1 && j<SIZE-1)
d_u_new[i*SIZE+j]=0.25*(
h*h*d_f[i *SIZE+j ]+
d_u[(i-1)*SIZE+j ]+
d_u[(i+1)*SIZE+j ]+
d_u[i *SIZE+j-1 ]+
d_u[i *SIZE+j+1 ]);
d_u_new[i*SIZE+j]=val;
__syncthreads();
}
int main()
{
float * h_u, * h_f;
float * d_u, *d_u_new, *d_f;
float * temp;
float suma=0.0;
int i,j;
size_t size;
float h = 1.0/SIZE;
/* Reservamos memoria */
size=SIZE*SIZE*sizeof(float);
printf("Necesitamos %d Mb\n",3*size/1024/1024);
h_u = (float*)malloc(size);
h_f = (float*)malloc(size);
/* REservamos memoria GPU*/
hipMalloc(&d_u,size);
hipMalloc(&d_u_new,size);
hipMalloc(&d_f,size);
/* Inicializamos */
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
h_f[i*SIZE+j]=0.0;
h_u[i*SIZE+j]=1.0*i;
}
}
for(i=0;i<SIZE;i++)
{
h_u[i]=0.0;
h_u[i*SIZE]=0.0;
h_u[SIZE*(SIZE-1)+i]=0.0;
h_u[i*SIZE+SIZE-1]=0.0;
}
/* Copiamos la memoria del host a la GPU */
hipMemcpy(d_f,h_f,size,hipMemcpyHostToDevice);
hipMemcpy(d_u,h_u,size,hipMemcpyHostToDevice);
hipMemcpy(d_u_new,h_u,size,hipMemcpyHostToDevice);
/* Creamos el grid para el clculo */
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(SIZE/BLOCK_SIZE,SIZE/BLOCK_SIZE);
/* Bucle principal, llamamos a JACOBI */
for(i=0;i<1000;i++)
{
hipLaunchKernelGGL(( jacobi), dim3(dimGrid),dim3(dimBlock), 0, 0, d_u_new,d_u,d_f,h,1.0*i);
temp=d_u;
d_u=d_u_new;
d_u_new=temp;
if(i%100==0)
{
printf("iter=%d\n",i);
printf("Copiando de GPU a CPU\n");
hipMemcpy(h_u,d_u,size,hipMemcpyDeviceToHost);
printf(" %f %f %f\n", h_u[100], h_u[101],h_u[102]);
}
}
/* Copiamos la memoria de la GPU al host */
printf("Copiando de GPU a CPU\n");
hipMemcpy(h_u,d_u,size,hipMemcpyDeviceToHost);
printf("Sumando en cpu\n");
/* Sumamos todos los elementos, una especie de checksum */
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
suma+=h_u[i*SIZE+j];
}
}
printf("%f %f %f %f\n", suma, h_u[100], h_u[101],h_u[102]);
/* Liberamos memoria */
free(h_u);
free(h_f);
hipFree(d_u_new);
hipFree(d_u);
hipFree(d_f);
}
| 5036420d96b827e532b044b79bf93eefd9cc5da8.cu | /*
* =====================================================================================
*
* Filename: jacobi_cpu.c
*
* Description:
*
* Version: 1.0
* Created: 12/05/11 02:30:51
* Revision: none
* Compiler: gcc
*
* Author: YOUR NAME (),
* Company:
*
* =====================================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#define SIZE 8192
#define BLOCK_SIZE 32
float ratio(float**u,float ant,int iter)
{
float tmp=0.0;
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
if(u[i][j]>tmp)
tmp=u[i][j];
}
}
if(iter%10==0)
printf(" iter=%d ratio=%f max=%f\n",iter,tmp/ant,tmp);
return tmp;
}
void muestra(float**u)
{
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
printf("%f ",u[i][j]);
}
printf("\n");
}
}
__global__ void jacobi(float *d_u_new,float *d_u, float *d_f,float h,float val)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i>0 && j>0 && i<SIZE-1 && j<SIZE-1)
d_u_new[i*SIZE+j]=0.25*(
h*h*d_f[i *SIZE+j ]+
d_u[(i-1)*SIZE+j ]+
d_u[(i+1)*SIZE+j ]+
d_u[i *SIZE+j-1 ]+
d_u[i *SIZE+j+1 ]);
d_u_new[i*SIZE+j]=val;
__syncthreads();
}
int main()
{
float * h_u, * h_f;
float * d_u, *d_u_new, *d_f;
float * temp;
float suma=0.0;
int i,j;
size_t size;
float h = 1.0/SIZE;
/* Reservamos memoria */
size=SIZE*SIZE*sizeof(float);
printf("Necesitamos %d Mb\n",3*size/1024/1024);
h_u = (float*)malloc(size);
h_f = (float*)malloc(size);
/* REservamos memoria GPU*/
cudaMalloc(&d_u,size);
cudaMalloc(&d_u_new,size);
cudaMalloc(&d_f,size);
/* Inicializamos */
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
h_f[i*SIZE+j]=0.0;
h_u[i*SIZE+j]=1.0*i;
}
}
for(i=0;i<SIZE;i++)
{
h_u[i]=0.0;
h_u[i*SIZE]=0.0;
h_u[SIZE*(SIZE-1)+i]=0.0;
h_u[i*SIZE+SIZE-1]=0.0;
}
/* Copiamos la memoria del host a la GPU */
cudaMemcpy(d_f,h_f,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_u,h_u,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_u_new,h_u,size,cudaMemcpyHostToDevice);
/* Creamos el grid para el cálculo */
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(SIZE/BLOCK_SIZE,SIZE/BLOCK_SIZE);
/* Bucle principal, llamamos a JACOBI */
for(i=0;i<1000;i++)
{
jacobi<<<dimGrid,dimBlock>>>(d_u_new,d_u,d_f,h,1.0*i);
temp=d_u;
d_u=d_u_new;
d_u_new=temp;
if(i%100==0)
{
printf("iter=%d\n",i);
printf("Copiando de GPU a CPU\n");
cudaMemcpy(h_u,d_u,size,cudaMemcpyDeviceToHost);
printf(" %f %f %f\n", h_u[100], h_u[101],h_u[102]);
}
}
/* Copiamos la memoria de la GPU al host */
printf("Copiando de GPU a CPU\n");
cudaMemcpy(h_u,d_u,size,cudaMemcpyDeviceToHost);
printf("Sumando en cpu\n");
/* Sumamos todos los elementos, una especie de checksum */
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
suma+=h_u[i*SIZE+j];
}
}
printf("%f %f %f %f\n", suma, h_u[100], h_u[101],h_u[102]);
/* Liberamos memoria */
free(h_u);
free(h_f);
cudaFree(d_u_new);
cudaFree(d_u);
cudaFree(d_f);
}
|
89c1c533f1de183b5793eb09187d58b04fb2dac7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*/
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
}
| 89c1c533f1de183b5793eb09187d58b04fb2dac7.cu | /*
*/
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
}
|
70ded6a0b3527a729ae05352ecaa78fead2251f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <rocblas.h>
// includes, project
#include <cutil_inline.h>
// includes, kernels
#include <reduction_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest();
////////////////////////////////////////////////////////////////////////////////
// export C interface
extern "C"
void computeGold( float* input, const unsigned int len, float* result);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
CUT_DEVICE_INIT(argc, argv);
runTest();
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest()
{
cublasStatus status;
status = hipblasInit();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
exit (1);
}
unsigned int num_elements = INPUT_SIZE;
unsigned int num_elements_B = 65536;
unsigned int timer;
cutilCheckError( cutCreateTimer(&timer));
const unsigned int mem_size = sizeof( float) * (num_elements);
const unsigned int output_mem_size = sizeof( float) * (num_elements);
// allocate host memory to store the input data
float* h_data = (float*) malloc( mem_size);
float* o_data = (float*) malloc(output_mem_size);
float* reference = (float*) malloc(output_mem_size);
float* b_data = (float*) malloc(num_elements_B*sizeof( float));
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = ((rand()/(float)RAND_MAX));
}
// printf("\n");
// compute reference solution
computeGold( h_data, num_elements, reference);
printf( "cpu: Test %f\n", reference[0]);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata;
float* b_idata;
cutilSafeCall( hipMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_odata, output_mem_size));
cutilSafeCall( hipMalloc( (void**) &b_idata, num_elements_B*sizeof( float)));
// copy host memory to device input array
cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
// but we invoke them here on many blocks so that we can accurately compare
// performance
// make sure there are no CUDA errors before we start
cutilCheckMsg("Kernel execution failed");
printf("Running %d elements\n", num_elements);
// execute the kernels
unsigned int numIterations = 1;
float results[1024];
int pid = 0;
{
cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
hipDeviceSynchronize();
cutStartTimer(timer);
float result = 0.0f;
for (int i=0; i<numIterations; i++) {
result += hipblasSasum(num_elements, d_idata, 1);
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("cublas: Average time: %f ms, %f\n", cutGetTimerValue(timer) / numIterations, result);
results[pid++] = cutGetTimerValue(timer) / numIterations;
cutResetTimer(timer);
}
{
cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
hipDeviceSynchronize();
cutStartTimer(timer);
float result = 0.0f;
for (int i=0; i<numIterations; i++) {
result += hipblasScasum(num_elements/2, (hipComplex*)d_idata, 1);
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("cublas complex: Average time: %f ms, %f\n", cutGetTimerValue(timer) / numIterations, result);
results[pid++] = cutGetTimerValue(timer) / numIterations;
cutResetTimer(timer);
}
{
cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
hipDeviceSynchronize();
cutStartTimer(timer);
float result = 0.0f;
int flip = 0;
for (int i=1; i<num_elements; i*=2) {
dim3 grid(num_elements/i/2/256, 1, 1);
if (grid.x>1024) {
grid.y = grid.x/1024;
grid.x = 1024;
}
dim3 threads(256, 1, 1);
if (grid.x==0) {
grid.x = 1;
threads.x = num_elements/i/2;
}
hipLaunchKernelGGL(( reduction_naive), dim3(grid), dim3(threads), 0, 0, flip?d_idata:d_odata, flip?d_odata:d_idata, num_elements/i);
flip = 1-flip;
}
cutilSafeCall(hipMemcpy( o_data, flip?d_odata:d_idata, sizeof(float)*1,
hipMemcpyDeviceToHost));
result = o_data[0]*numIterations;
hipDeviceSynchronize();
cutStopTimer(timer);
printf("naive: Average time: %f ms, %f\n", cutGetTimerValue(timer) , result);
results[pid++] = cutGetTimerValue(timer);
cutResetTimer(timer);
}
{
cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
hipDeviceSynchronize();
cutStartTimer(timer);
float result = 0.0f;
numIterations = 1;
for (int i=0; i<numIterations; i++) {
dim3 grid(65536/512, 1, 1);
dim3 threads(512, 1, 1);
hipLaunchKernelGGL(( reduction_complex_opt_0), dim3(grid), dim3(threads), 0, 0, d_idata, b_idata, num_elements/2, 262144);
grid.x = 1;
// threads.x = 512;
hipLaunchKernelGGL(( reduction_complex_opt_1), dim3(grid), dim3(threads), 0, 0, d_idata, b_idata, num_elements/2, 262144);
cutilSafeCall(hipMemcpy( o_data, b_idata, sizeof(float)*1,
hipMemcpyDeviceToHost));
result += o_data[0];
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("reduction_complex_opt : Average time: %f ms, %f\n", cutGetTimerValue(timer) / numIterations, result);
results[pid++] = cutGetTimerValue(timer)/numIterations;
cutResetTimer(timer);
}
{
cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
hipDeviceSynchronize();
cutStartTimer(timer);
float result = 0.0f;
numIterations = 1;
for (int i=0; i<numIterations; i++) {
dim3 grid(65536/512, 1, 1);
dim3 threads(512, 1, 1);
hipLaunchKernelGGL(( reduction_opt_0), dim3(grid), dim3(threads), 0, 0, d_idata, num_elements, 262144);
grid.x = 1;
// threads.x = 512;
hipLaunchKernelGGL(( reduction_opt_1), dim3(grid), dim3(threads), 0, 0, d_idata, num_elements, 262144);
cutilSafeCall(hipMemcpy( o_data, d_idata, sizeof(float)*1,
hipMemcpyDeviceToHost));
result += o_data[0];
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("reduction_opt : Average time: %f ms, %f\n", cutGetTimerValue(timer) / numIterations, result);
results[pid++] = cutGetTimerValue(timer)/numIterations;
cutResetTimer(timer);
}
// cleanup memory
free( h_data);
free( o_data);
free( reference);
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata));
cutilCheckError(cutDeleteTimer(timer));
status = hipblasShutdown();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! shutdown error\n");
}
hipDeviceReset();
}
| 70ded6a0b3527a729ae05352ecaa78fead2251f1.cu |
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cublas.h>
// includes, project
#include <cutil_inline.h>
// includes, kernels
#include <reduction_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest();
////////////////////////////////////////////////////////////////////////////////
// export C interface
extern "C"
void computeGold( float* input, const unsigned int len, float* result);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
CUT_DEVICE_INIT(argc, argv);
runTest();
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest()
{
cublasStatus status;
status = cublasInit();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
exit (1);
}
unsigned int num_elements = INPUT_SIZE;
unsigned int num_elements_B = 65536;
unsigned int timer;
cutilCheckError( cutCreateTimer(&timer));
const unsigned int mem_size = sizeof( float) * (num_elements);
const unsigned int output_mem_size = sizeof( float) * (num_elements);
// allocate host memory to store the input data
float* h_data = (float*) malloc( mem_size);
float* o_data = (float*) malloc(output_mem_size);
float* reference = (float*) malloc(output_mem_size);
float* b_data = (float*) malloc(num_elements_B*sizeof( float));
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = ((rand()/(float)RAND_MAX));
}
// printf("\n");
// compute reference solution
computeGold( h_data, num_elements, reference);
printf( "cpu: Test %f\n", reference[0]);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata;
float* b_idata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_odata, output_mem_size));
cutilSafeCall( cudaMalloc( (void**) &b_idata, num_elements_B*sizeof( float)));
// copy host memory to device input array
cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
// but we invoke them here on many blocks so that we can accurately compare
// performance
// make sure there are no CUDA errors before we start
cutilCheckMsg("Kernel execution failed");
printf("Running %d elements\n", num_elements);
// execute the kernels
unsigned int numIterations = 1;
float results[1024];
int pid = 0;
{
cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
cudaThreadSynchronize();
cutStartTimer(timer);
float result = 0.0f;
for (int i=0; i<numIterations; i++) {
result += cublasSasum(num_elements, d_idata, 1);
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("cublas: Average time: %f ms, %f\n", cutGetTimerValue(timer) / numIterations, result);
results[pid++] = cutGetTimerValue(timer) / numIterations;
cutResetTimer(timer);
}
{
cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
cudaThreadSynchronize();
cutStartTimer(timer);
float result = 0.0f;
for (int i=0; i<numIterations; i++) {
result += cublasScasum(num_elements/2, (cuComplex*)d_idata, 1);
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("cublas complex: Average time: %f ms, %f\n", cutGetTimerValue(timer) / numIterations, result);
results[pid++] = cutGetTimerValue(timer) / numIterations;
cutResetTimer(timer);
}
{
cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
cudaThreadSynchronize();
cutStartTimer(timer);
float result = 0.0f;
int flip = 0;
for (int i=1; i<num_elements; i*=2) {
dim3 grid(num_elements/i/2/256, 1, 1);
if (grid.x>1024) {
grid.y = grid.x/1024;
grid.x = 1024;
}
dim3 threads(256, 1, 1);
if (grid.x==0) {
grid.x = 1;
threads.x = num_elements/i/2;
}
reduction_naive<<< grid, threads>>>(flip?d_idata:d_odata, flip?d_odata:d_idata, num_elements/i);
flip = 1-flip;
}
cutilSafeCall(cudaMemcpy( o_data, flip?d_odata:d_idata, sizeof(float)*1,
cudaMemcpyDeviceToHost));
result = o_data[0]*numIterations;
cudaThreadSynchronize();
cutStopTimer(timer);
printf("naive: Average time: %f ms, %f\n", cutGetTimerValue(timer) , result);
results[pid++] = cutGetTimerValue(timer);
cutResetTimer(timer);
}
{
cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
cudaThreadSynchronize();
cutStartTimer(timer);
float result = 0.0f;
numIterations = 1;
for (int i=0; i<numIterations; i++) {
dim3 grid(65536/512, 1, 1);
dim3 threads(512, 1, 1);
reduction_complex_opt_0<<< grid, threads>>>(d_idata, b_idata, num_elements/2, 262144);
grid.x = 1;
// threads.x = 512;
reduction_complex_opt_1<<< grid, threads>>>(d_idata, b_idata, num_elements/2, 262144);
cutilSafeCall(cudaMemcpy( o_data, b_idata, sizeof(float)*1,
cudaMemcpyDeviceToHost));
result += o_data[0];
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("reduction_complex_opt : Average time: %f ms, %f\n", cutGetTimerValue(timer) / numIterations, result);
results[pid++] = cutGetTimerValue(timer)/numIterations;
cutResetTimer(timer);
}
{
cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
cudaThreadSynchronize();
cutStartTimer(timer);
float result = 0.0f;
numIterations = 1;
for (int i=0; i<numIterations; i++) {
dim3 grid(65536/512, 1, 1);
dim3 threads(512, 1, 1);
reduction_opt_0<<< grid, threads>>>(d_idata, num_elements, 262144);
grid.x = 1;
// threads.x = 512;
reduction_opt_1<<< grid, threads>>>(d_idata, num_elements, 262144);
cutilSafeCall(cudaMemcpy( o_data, d_idata, sizeof(float)*1,
cudaMemcpyDeviceToHost));
result += o_data[0];
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("reduction_opt : Average time: %f ms, %f\n", cutGetTimerValue(timer) / numIterations, result);
results[pid++] = cutGetTimerValue(timer)/numIterations;
cutResetTimer(timer);
}
// cleanup memory
free( h_data);
free( o_data);
free( reference);
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata));
cutilCheckError(cutDeleteTimer(timer));
status = cublasShutdown();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! shutdown error\n");
}
cudaThreadExit();
}
|
c77e2c702963b36bb4c57fa59a4a16a24632e45b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<string.h>
__global__ void conc(char *a,int len,char *b) {
int id = blockIdx.x*blockDim.x + threadIdx.x;
for(int i=0; i<len; i++)
b[id*len+i] = a[i];
}
int main() {
int n,len;
char a[100],b[500];
printf("Enter string:");
gets(a);
printf("Enter n: ");
scanf("%d",&n);
len = strlen(a);
char *d_a,*d_b;
memset(b,0,100);
hipMalloc((void**)&d_a,len);
hipMalloc((void**)&d_b,len*n);
hipMemcpy(d_a,&a,len,hipMemcpyHostToDevice);
hipMemcpy(d_b,&b,len*n,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conc), dim3(1),dim3(n), 0, 0, d_a,len,d_b);
hipMemcpy(&b,d_b,len*n,hipMemcpyDeviceToHost);
printf("%s\n",b);
hipFree(d_a);
hipFree(d_b);
}
| c77e2c702963b36bb4c57fa59a4a16a24632e45b.cu | #include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<string.h>
__global__ void conc(char *a,int len,char *b) {
int id = blockIdx.x*blockDim.x + threadIdx.x;
for(int i=0; i<len; i++)
b[id*len+i] = a[i];
}
int main() {
int n,len;
char a[100],b[500];
printf("Enter string:");
gets(a);
printf("Enter n: ");
scanf("%d",&n);
len = strlen(a);
char *d_a,*d_b;
memset(b,0,100);
cudaMalloc((void**)&d_a,len);
cudaMalloc((void**)&d_b,len*n);
cudaMemcpy(d_a,&a,len,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,len*n,cudaMemcpyHostToDevice);
conc<<<1,n>>>(d_a,len,d_b);
cudaMemcpy(&b,d_b,len*n,cudaMemcpyDeviceToHost);
printf("%s\n",b);
cudaFree(d_a);
cudaFree(d_b);
}
|
0de7318072cdf4e34ba497b8d3b11a65134a77ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/div_rtn.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/im2col.cuh>
#include <ATen/native/im2col_shape_check.h>
namespace at {
namespace native {
namespace {
void col2im_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
TensorArg input_arg{input_, "input", 1};
TensorArg output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
int64_t output_height = output_size[0];
int64_t output_width = output_size[1];
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
col2im_shape_check(
input_,
Tensor(),
output_height,
output_width,
kernel_height,
kernel_width,
dilation_height,
dilation_width,
pad_height,
pad_width,
stride_height,
stride_width);
Tensor input = input_.contiguous();
bool batched_input = true;
if (input.dim() == 2) {
// Force batch
batched_input = false;
input.resize_({1, input.size(0), input.size(1)});
}
int64_t batch_size = input.size(0);
int64_t n_input_plane = input.size(1);
int64_t n_output_plane = n_input_plane / (kernel_width * kernel_height);
output.resize_({batch_size, n_output_plane, output_height, output_width});
output.zero_();
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf,
input.scalar_type(), "col2im_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
Tensor input_n;
Tensor output_n;
int64_t height_col = (output_height + 2 * pad_height -
(dilation_height * (kernel_height - 1) + 1)) /
stride_height +
1;
int64_t width_col = (output_width + 2 * pad_width -
(dilation_width * (kernel_width - 1) + 1)) /
stride_width +
1;
for (int64_t elt = 0; elt < batch_size; elt++) {
input_n = input.select(0, elt);
output_n = output.select(0, elt);
col2im<scalar_t, accscalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
height_col,
width_col,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
}
if (!batched_input) {
output.resize_({n_output_plane, output_height, output_width});
}
});
}
void col2im_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
// im2col_out_cuda checks size of kernel_size, dilation, padding and stride
at::native::im2col_out_cuda(
grad_output, kernel_size, dilation, padding, stride, grad_input);
}
} // namespace
Tensor& col2im_out_cuda(const Tensor& input,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride,
Tensor& output) {
col2im_out_cuda_template(
output, input, output_size, kernel_size, dilation, padding, stride);
return output;
}
Tensor col2im_cuda(
const Tensor& input,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
col2im_out_cuda_template(
output, input, output_size, kernel_size, dilation, padding, stride);
return output;
}
Tensor& col2im_backward_out_cuda(const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride,
Tensor& grad_input) {
col2im_backward_out_cuda_template(
grad_input, grad_output, kernel_size, dilation, padding, stride);
return grad_input;
}
Tensor col2im_backward_cuda(
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
col2im_backward_out_cuda_template(
grad_input, grad_output, kernel_size, dilation, padding, stride);
return grad_input;
}
} // namespace native
} // namespace at
| 0de7318072cdf4e34ba497b8d3b11a65134a77ae.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/div_rtn.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/im2col.cuh>
#include <ATen/native/im2col_shape_check.h>
namespace at {
namespace native {
namespace {
void col2im_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
TensorArg input_arg{input_, "input", 1};
TensorArg output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
int64_t output_height = output_size[0];
int64_t output_width = output_size[1];
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
col2im_shape_check(
input_,
Tensor(),
output_height,
output_width,
kernel_height,
kernel_width,
dilation_height,
dilation_width,
pad_height,
pad_width,
stride_height,
stride_width);
Tensor input = input_.contiguous();
bool batched_input = true;
if (input.dim() == 2) {
// Force batch
batched_input = false;
input.resize_({1, input.size(0), input.size(1)});
}
int64_t batch_size = input.size(0);
int64_t n_input_plane = input.size(1);
int64_t n_output_plane = n_input_plane / (kernel_width * kernel_height);
output.resize_({batch_size, n_output_plane, output_height, output_width});
output.zero_();
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf,
input.scalar_type(), "col2im_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
Tensor input_n;
Tensor output_n;
int64_t height_col = (output_height + 2 * pad_height -
(dilation_height * (kernel_height - 1) + 1)) /
stride_height +
1;
int64_t width_col = (output_width + 2 * pad_width -
(dilation_width * (kernel_width - 1) + 1)) /
stride_width +
1;
for (int64_t elt = 0; elt < batch_size; elt++) {
input_n = input.select(0, elt);
output_n = output.select(0, elt);
col2im<scalar_t, accscalar_t>(
at::cuda::getCurrentCUDAStream(),
input_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
height_col,
width_col,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
}
if (!batched_input) {
output.resize_({n_output_plane, output_height, output_width});
}
});
}
void col2im_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
// im2col_out_cuda checks size of kernel_size, dilation, padding and stride
at::native::im2col_out_cuda(
grad_output, kernel_size, dilation, padding, stride, grad_input);
}
} // namespace
Tensor& col2im_out_cuda(const Tensor& input,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride,
Tensor& output) {
col2im_out_cuda_template(
output, input, output_size, kernel_size, dilation, padding, stride);
return output;
}
Tensor col2im_cuda(
const Tensor& input,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
col2im_out_cuda_template(
output, input, output_size, kernel_size, dilation, padding, stride);
return output;
}
Tensor& col2im_backward_out_cuda(const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride,
Tensor& grad_input) {
col2im_backward_out_cuda_template(
grad_input, grad_output, kernel_size, dilation, padding, stride);
return grad_input;
}
Tensor col2im_backward_cuda(
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
col2im_backward_out_cuda_template(
grad_input, grad_output, kernel_size, dilation, padding, stride);
return grad_input;
}
} // namespace native
} // namespace at
|
3e668b50f7b3875afb3554f9154c556b24279943.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::mallocVectors(std::size_t vecSize)
{
if (vecInitialized_ && vecN_ != vecSize) {
clearVec();
}
if (!vecInitialized_) {
int status = 0;
status |= mallocSplitPtr(&d_x_, vecSize);
if (status) {
return;
}
status |= mallocSplitPtr(&d_y_, vecSize);
if (status) {
freeSplitPtr(d_x_);
return;
}
status |= mallocSplitPtr(&d_split_buffer_, vecSize);
if (status) {
freeSplitPtr(d_x_);
freeSplitPtr(d_y_);
freeSplitPtr(d_split_buffer_);
return;
}
status |= hipMalloc(&d_buffer_, sizeof(OriginalPointerType) * vecSize);
if (status) {
freeSplitPtr(d_x_);
freeSplitPtr(d_y_);
freeSplitPtr(d_split_buffer_);
return;
}
}
vecN_ = vecSize;
vecInitialized_ = true;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::setUpVectors(std::size_t n)
{
if (vecInitialized_ && vecN_ < n) {
clearVec();
}
if (!vecInitialized_) {
mallocVectors(n);
setVectors(static_cast<OriginalPointerType>(1) / static_cast<OriginalPointerType>(n));
/*
std::vector<OriginalPointerType> rndNumbers(n);
for (size_t i = 0; i < n; ++i) {
rndNumbers[i] = rand() % maxRndNumber;
}
setUpVectors(rndNumbers);
*/
/*mallocSplitPtr(&d_x_, n);
mallocSplitPtr(&d_y_, n);
hipMalloc(&d_buffer_, sizeof(OriginalPointerType) * n);
vecN_ = n;
vecInitialized_ = true;*/
}
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::setUpVectors(const std::vector<OriginalPointerType> &n)
{
if (vecInitialized_ && vecN_ < n.size()) {
clearVec();
}
if (!vecInitialized_) {
mallocVectors(n.size());
//setVector<<<gridSet, blockSet>>>(d_buffer_, static_cast<OriginalPointerType>(rand() % maxRndNumber));
if (!vecInitialized_) {
return;
}
hipMemcpy(d_buffer_, n.data(), sizeof(OriginalPointerType) * n.size(), hipMemcpyHostToDevice);
setUpHostSplitVec(n.size());
OriginalPointerType sum = 0;
for (std::size_t i = 0; i < n.size(); ++i) {
hostSplitVec_.writeAll(i, n[i]);
sum += n[i];
/*
if (i < 10) {
std::cout << "[" << i << "] vec number = " << n[i] << std::endl;
}*/
}
//std::cout << "Expected Norm: " << sum << '\n';
copy(d_x_, hostSplitVec_, n.size(), hipMemcpyHostToDevice);
copy(d_y_, hostSplitVec_, n.size(), hipMemcpyHostToDevice);
clearHostSplitVec();
}
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::setVectors(OriginalPointerType initValues)
{
if (!vecInitialized_) {
return;
}
const dim3 blockSet(512);
const dim3 gridSet((vecN_ - 1) / blockSet.x + 1);
hipLaunchKernelGGL(( setVector), dim3(gridSet), dim3(blockSet), 0, 0, vecN_, d_x_, initValues);
hipLaunchKernelGGL(( setVector), dim3(gridSet), dim3(blockSet), 0, 0, vecN_, d_y_, initValues);
hipDeviceSynchronize();
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::setUpMatrix(const CooMatrix<IndexType, OriginalPointerType> &mtx)
{
if (matInitialized_) {
clearMat();
}
if (!matInitialized_) {
EllMatrix<IndexType, OriginalPointerType> ellMtx(mtx);
setUpMatrix(ellMtx);
}
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::setUpMatrix(const EllMatrix<IndexType, OriginalPointerType> &mtx)
{
if (matInitialized_) {
clearMat();
}
if (!matInitialized_ && mtx.isValid()) {
const std::vector<OriginalPointerType> &entries = mtx.getVals();
setUpHostSplitVec(entries.size());
for (std::size_t i = 0; i < entries.size(); ++i) {
hostSplitVec_.writeAll(i, entries[i]);
}
int status = 0;
status |= mallocSplitPtr(&d_vals_, entries.size());
if (status) {
std::cerr << "Error while allocating matrix values (size: " << entries.size() << ") with error: " << status << "!\n";
return;
}
copy(d_vals_, hostSplitVec_, entries.size(), hipMemcpyHostToDevice);
status |= hipMalloc(&d_indices_, mtx.getIndices().size() * sizeof(IndexType));
if (status) {
freeSplitPtr(d_vals_);
std::cerr << "Error while allocating indices (size: " << mtx.getIndices().size() << ") with error: " << status << "!\n";
return;
}
hipMemcpy(d_indices_, mtx.getIndices().data(), mtx.getIndices().size() * sizeof(IndexType), hipMemcpyHostToDevice);
status |= hipMalloc(&d_emptyColIndices_, mtx.getEmptyColIndices().size() * sizeof(IndexType));
if (status) {
freeSplitPtr(d_vals_);
hipFree(d_indices_);
std::cerr << "Error while allocating empty column indices (size: " << mtx.getEmptyColIndices().size() << ") with error: " << status << "!\n";
return;
}
hipMemcpy(d_emptyColIndices_, mtx.getEmptyColIndices().data(), mtx.getEmptyColIndices().size() * sizeof(IndexType), hipMemcpyHostToDevice);
emptyColNumber_ = mtx.getEmptyColIndices().size();
m_ = mtx.getM();
n_ = mtx.getN();
k_ = mtx.getK();
if (!vecInitialized_ || vecN_ != n_) {
setUpVectors(n_);
}
matInitialized_ = true;
clearHostSplitVec();
}
}
// returns ns for one execution, averaged over <loopCount_> runs
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runOwnNorm()
{
if (!vecInitialized_) {
return 0;
}
// run the Iterations at least once before measuring
OriginalPointerType result{}, lastResult{};
lastResult = result = cudaNorm1<BlockSize, ReadPartsNumber>(d_x_, vecN_, d_buffer_);
//std::cout << "calculating Norm of size: "<< vecN_ << std::endl;
hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
result = cudaNorm1<BlockSize, ReadPartsNumber>(d_x_, vecN_, d_buffer_);
if (result != lastResult /*|| abs(result - lastResult) > 1e-19*/ ) {
std::cerr << "Error while calculating own norm! " << lastResult << " vs. " << result << "\n";
}
lastResult = result;
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
normResult_ = result;
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
// returns ns for one execution, averaged over <loopCount_> runs
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runAltOwnNorm()
{
if (!vecInitialized_) {
return 0;
}
// run the Iterations at least once before measuring
OriginalPointerType result{}, lastResult{};
lastResult = result = cudaNorm1<BlockSize, ReadPartsNumber>(d_x_, vecN_, d_split_buffer_, d_buffer_);
//std::cout << "calculating Norm of size: "<< vecN_ << std::endl;
hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
result = cudaNorm1<BlockSize, ReadPartsNumber>(d_x_, vecN_, d_split_buffer_, d_buffer_);
if (result != lastResult /*|| abs(result - lastResult) > 1e-19*/ ) {
std::cerr << "Error while calculating own norm! " << lastResult << " vs. " << result << "\n";
}
lastResult = result;
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
normResult_ = result;
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
//works only if calculated with normal doubles
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runCuBlasNorm()
{
if (!vecInitialized_ || NumberSegments != 1 || std::is_same<OriginalPointerType, double>::value == false) {
return 0;
}
hipblasStatus_t status;
hipblasHandle_t handle;
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
std::cout << "INIT error with code " << status << std::endl;
/*
std::cout << "Error List:\n" << HIPBLAS_STATUS_NOT_INITIALIZED << '\n'
<< HIPBLAS_STATUS_ALLOC_FAILED << '\n'
<< HIPBLAS_STATUS_INVALID_VALUE << '\n'
<< HIPBLAS_STATUS_ARCH_MISMATCH << '\n'
<< HIPBLAS_STATUS_MAPPING_ERROR << '\n'
<< HIPBLAS_STATUS_EXECUTION_FAILED << '\n'
<< HIPBLAS_STATUS_INTERNAL_ERROR << '\n'
<< HIPBLAS_STATUS_NOT_SUPPORTED << '\n'
<< CUBLAS_STATUS_LICENSE_ERROR << '\n';
*/
return 0;
}
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST);
OriginalPointerType result{}, lastResult;
status = hipblasDnrm2(handle, vecN_, d_buffer_, 1, &result);
lastResult = result;
if (status != HIPBLAS_STATUS_SUCCESS) {
std::cout << "Norm error with code " << status << std::endl;
return 0;
}
hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
status = hipblasDnrm2(handle, vecN_, d_buffer_, 1, &result);
if (lastResult != result) {
std::cerr << "Big Error!\n";
}
lastResult = result;
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
normResult_ = result;
hipblasDestroy(handle);
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runSpmv()
{
if (!matInitialized_ || !vecInitialized_ || vecN_ < m_ || m_ != n_) {
std::cout << matInitialized_ << ' ' << vecInitialized_ << ' ' << vecN_ << ' ' << m_ << 'x' << n_ << std::endl;
return 0;
}
const dim3 blockPageRank(BlockSize);
const dim3 gridPageRank((m_ - 1) / blockPageRank.x + 1);
// run the Iterations at least once before measuring
const OriginalPointerType dampingFactor = static_cast<OriginalPointerType>(0.85);
const OriginalPointerType correction = static_cast<OriginalPointerType>(0);
std::uint64_t status = 0;
hipLaunchKernelGGL(( ellPageRank<ReadPartsNumber, ReadPartsNumber>), dim3(gridPageRank), dim3(blockPageRank), 0, 0,
m_,
k_,
d_indices_,
d_vals_,
dampingFactor,
correction,
d_x_,
d_y_);
hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
auto oldX = d_x_;
d_x_ = d_y_;
d_y_ = oldX;
hipLaunchKernelGGL(( ellPageRank<ReadPartsNumber, ReadPartsNumber>), dim3(gridPageRank), dim3(blockPageRank), 0, 0,
m_,
k_,
d_indices_,
d_vals_,
dampingFactor,
correction,
d_x_,
d_y_);
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
status |= hipDeviceSynchronize();
if (status != 0) {
std::cerr << "Error with spmv: " << status << std::endl;
}
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
setUpHostSplitVec(m_);
copy(hostSplitVec_, d_y_, m_, hipMemcpyDeviceToHost);
vectorResult_.resize(m_);
for (std::size_t i = 0; i < m_; ++i) {
vectorResult_[i] = hostSplitVec_.readAll(i);
}
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runRWTest()
{
if (!vecInitialized_) {
return 0;
}
const dim3 blockRW(BlockSize);
const dim3 gridRW((m_ - 1) / blockRW.x + 1);
//std::cout << "Running RW test with: " << gridRW.x << " x " << blockRW.x << ", Reading " << ReadPartsNumber << " Parts..." << std::endl;
hipLaunchKernelGGL(( rwTest<ReadPartsNumber>), dim3(gridRW), dim3(blockRW), 0, 0, vecN_, d_x_, 2.0);
hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
hipLaunchKernelGGL(( rwTest<ReadPartsNumber>), dim3(gridRW), dim3(blockRW), 0, 0, vecN_, d_x_, 2.0);
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
setUpHostSplitVec(m_);
copy(hostSplitVec_, d_y_, m_, hipMemcpyDeviceToHost);
vectorResult_.resize(m_);
for (std::size_t i = 0; i < m_; ++i) {
vectorResult_[i] = hostSplitVec_.readAll(i);
}
normResult_ = cudaNorm1<BlockSize, ReadPartsNumber>(d_x_, vecN_, d_buffer_);
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::testConversion()
{
if (!vecInitialized_) {
return;
}
constexpr std::size_t doubleValuesIn2Blocks = 8 * 128 / sizeof(typename SplitType::SegmentType);
OriginalPointerType hostVals[doubleValuesIn2Blocks];
std::size_t testSize = ::min(doubleValuesIn2Blocks, static_cast<std::size_t>(vecN_));
setUpHostSplitVec(testSize);
auto vectorValue = [](std::size_t i) { return static_cast<OriginalPointerType>(i) + OriginalPointerType{1} / static_cast<OriginalPointerType>(i); };
for (std::size_t i = 0; i < testSize; ++i) {
hostSplitVec_.writeAll(i, vectorValue(i));
}
copy(d_split_buffer_, hostSplitVec_, m_, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( convertToOriginal), dim3(1), dim3(testSize), 0, 0, testSize, d_split_buffer_);
hipMemcpy(hostVals, d_split_buffer_.segmentPtr, sizeof(OriginalPointerType) * testSize, hipMemcpyDeviceToHost);
std::cout << std::scientific << std::setprecision(15);
for (std::size_t i = 0; i < testSize; ++i) {
if (hostVals[i] != vectorValue(i)) {
std::cout << std::setw(3) << i << ": Expected: " << vectorValue(i)
<< "\n " << "Actual: " << hostVals[i] << '\n';
std::cerr << "BIG ERROR!!! UNEQUAL!!!\n\n";
}
}
hipLaunchKernelGGL(( convertToSplitPointer), dim3(1),dim3(testSize), 0, 0, testSize, d_split_buffer_);
copy(hostSplitVec_, d_split_buffer_, testSize, hipMemcpyDeviceToHost);
for (std::size_t i = 0; i < testSize; ++i) {
OriginalPointerType curVal = hostSplitVec_.readAll(i);
if (curVal != vectorValue(i)) {
std::cout << std::setw(3) << i << ": Expected: " << vectorValue(i)
<< "\n " << "Actual: " << curVal << '\n';
std::cerr << "BIG ERROR!!! UNEQUAL!!!\n\n";
}
}
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runConversion()
{
if (!vecInitialized_ || !matInitialized_) {
return 0;
}
const dim3 blockVectorConversion(BlockSize);
const dim3 gridVectorConversion((vecN_ - 1) / blockVectorConversion.x + 1);
const dim3 blockMatrixConversion(BlockSize);
const dim3 gridMatrixConversion((k_ * m_ - 1) / blockMatrixConversion.x + 1);
// run the Iterations at least once before measuring
std::uint64_t status = 0;
for (int readPrec = 2; readPrec < NumberSegments; ++readPrec) {
normalizeVectorSelector<BlockSize>(readPrec, d_x_, vecN_, d_buffer_);
}
hipLaunchKernelGGL(( convertToOriginal), dim3(gridVectorConversion), dim3(blockVectorConversion), 0, 0, vecN_, d_x_);
hipLaunchKernelGGL(( convertToOriginal), dim3(gridMatrixConversion), dim3(blockMatrixConversion), 0, 0, k_*m_, d_vals_);
status |= hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
for (int readPrec = 2; readPrec < NumberSegments; ++readPrec) {
normalizeVectorSelector<BlockSize>(readPrec, d_x_, vecN_, d_buffer_);
}
hipLaunchKernelGGL(( convertToOriginal), dim3(gridVectorConversion), dim3(blockVectorConversion), 0, 0, vecN_, d_x_);
hipLaunchKernelGGL(( convertToOriginal), dim3(gridMatrixConversion), dim3(blockMatrixConversion), 0, 0, k_*m_, d_vals_);
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
if ((loopCount_ + 1) % 2 == 1) {
hipLaunchKernelGGL(( convertToOriginal), dim3(gridVectorConversion), dim3(blockVectorConversion), 0, 0, vecN_, d_x_);
hipLaunchKernelGGL(( convertToOriginal), dim3(gridMatrixConversion), dim3(blockMatrixConversion), 0, 0, k_*m_, d_vals_);
}
status |= hipDeviceSynchronize();
if (status != 0) {
std::cerr << "Error with conversion: " << status << std::endl;
}
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runNormalization(int readPrec)
{
if (!vecInitialized_ || !matInitialized_) {
return 0;
}
// run the Iterations at least once before measuring
std::uint64_t status = 0;
normalizeVectorSelector<BlockSize>(readPrec, d_x_, vecN_, d_buffer_);
status |= hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
normalizeVectorSelector<BlockSize>(readPrec, d_x_, vecN_, d_buffer_);
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
status |= hipDeviceSynchronize();
if (status != 0) {
std::cerr << "Error with vector normalization: " << status << std::endl;
}
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runDiffNorm()
{
if (!vecInitialized_) {
return 0;
}
// run the Iterations at least once before measuring
OriginalPointerType result{}, lastResult{};
lastResult = result = cudaNorm1Diff<BlockSize, ReadPartsNumber>(d_x_, d_y_, vecN_, d_buffer_);
//std::cout << "calculating Norm of size: "<< vecN_ << std::endl;
hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
result = cudaNorm1Diff<BlockSize, ReadPartsNumber>(d_x_, d_y_, vecN_, d_buffer_);
if (result != lastResult) {
std::cerr << "Error while calculating diff norm! " << lastResult << " vs. " << result << "\n";
}
lastResult = result;
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
normResult_ = result;
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runSelectiveNorm()
{
if (!vecInitialized_) {
return 0;
}
// run the Iterations at least once before measuring
OriginalPointerType result{}, lastResult{};
lastResult = result = cudaSelectiveNorm1<BlockSize, ReadPartsNumber>(d_x_, emptyColNumber_, d_emptyColIndices_, d_buffer_);
//std::cout << "calculating Norm of size: "<< vecN_ << std::endl;
hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
result = cudaSelectiveNorm1<BlockSize, ReadPartsNumber>(d_x_, emptyColNumber_, d_emptyColIndices_, d_buffer_);
if (result != lastResult) {
std::cerr << "Error while calculating selective norm! " << lastResult << " vs. " << result << "\n";
}
lastResult = result;
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
normResult_ = result;
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
| 3e668b50f7b3875afb3554f9154c556b24279943.cu | template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::mallocVectors(std::size_t vecSize)
{
if (vecInitialized_ && vecN_ != vecSize) {
clearVec();
}
if (!vecInitialized_) {
int status = 0;
status |= mallocSplitPtr(&d_x_, vecSize);
if (status) {
return;
}
status |= mallocSplitPtr(&d_y_, vecSize);
if (status) {
freeSplitPtr(d_x_);
return;
}
status |= mallocSplitPtr(&d_split_buffer_, vecSize);
if (status) {
freeSplitPtr(d_x_);
freeSplitPtr(d_y_);
freeSplitPtr(d_split_buffer_);
return;
}
status |= cudaMalloc(&d_buffer_, sizeof(OriginalPointerType) * vecSize);
if (status) {
freeSplitPtr(d_x_);
freeSplitPtr(d_y_);
freeSplitPtr(d_split_buffer_);
return;
}
}
vecN_ = vecSize;
vecInitialized_ = true;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::setUpVectors(std::size_t n)
{
if (vecInitialized_ && vecN_ < n) {
clearVec();
}
if (!vecInitialized_) {
mallocVectors(n);
setVectors(static_cast<OriginalPointerType>(1) / static_cast<OriginalPointerType>(n));
/*
std::vector<OriginalPointerType> rndNumbers(n);
for (size_t i = 0; i < n; ++i) {
rndNumbers[i] = rand() % maxRndNumber;
}
setUpVectors(rndNumbers);
*/
/*mallocSplitPtr(&d_x_, n);
mallocSplitPtr(&d_y_, n);
cudaMalloc(&d_buffer_, sizeof(OriginalPointerType) * n);
vecN_ = n;
vecInitialized_ = true;*/
}
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::setUpVectors(const std::vector<OriginalPointerType> &n)
{
if (vecInitialized_ && vecN_ < n.size()) {
clearVec();
}
if (!vecInitialized_) {
mallocVectors(n.size());
//setVector<<<gridSet, blockSet>>>(d_buffer_, static_cast<OriginalPointerType>(rand() % maxRndNumber));
if (!vecInitialized_) {
return;
}
cudaMemcpy(d_buffer_, n.data(), sizeof(OriginalPointerType) * n.size(), cudaMemcpyHostToDevice);
setUpHostSplitVec(n.size());
OriginalPointerType sum = 0;
for (std::size_t i = 0; i < n.size(); ++i) {
hostSplitVec_.writeAll(i, n[i]);
sum += n[i];
/*
if (i < 10) {
std::cout << "[" << i << "] vec number = " << n[i] << std::endl;
}*/
}
//std::cout << "Expected Norm: " << sum << '\n';
copy(d_x_, hostSplitVec_, n.size(), cudaMemcpyHostToDevice);
copy(d_y_, hostSplitVec_, n.size(), cudaMemcpyHostToDevice);
clearHostSplitVec();
}
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::setVectors(OriginalPointerType initValues)
{
if (!vecInitialized_) {
return;
}
const dim3 blockSet(512);
const dim3 gridSet((vecN_ - 1) / blockSet.x + 1);
setVector<<<gridSet, blockSet>>>(vecN_, d_x_, initValues);
setVector<<<gridSet, blockSet>>>(vecN_, d_y_, initValues);
cudaDeviceSynchronize();
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::setUpMatrix(const CooMatrix<IndexType, OriginalPointerType> &mtx)
{
if (matInitialized_) {
clearMat();
}
if (!matInitialized_) {
EllMatrix<IndexType, OriginalPointerType> ellMtx(mtx);
setUpMatrix(ellMtx);
}
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::setUpMatrix(const EllMatrix<IndexType, OriginalPointerType> &mtx)
{
if (matInitialized_) {
clearMat();
}
if (!matInitialized_ && mtx.isValid()) {
const std::vector<OriginalPointerType> &entries = mtx.getVals();
setUpHostSplitVec(entries.size());
for (std::size_t i = 0; i < entries.size(); ++i) {
hostSplitVec_.writeAll(i, entries[i]);
}
int status = 0;
status |= mallocSplitPtr(&d_vals_, entries.size());
if (status) {
std::cerr << "Error while allocating matrix values (size: " << entries.size() << ") with error: " << status << "!\n";
return;
}
copy(d_vals_, hostSplitVec_, entries.size(), cudaMemcpyHostToDevice);
status |= cudaMalloc(&d_indices_, mtx.getIndices().size() * sizeof(IndexType));
if (status) {
freeSplitPtr(d_vals_);
std::cerr << "Error while allocating indices (size: " << mtx.getIndices().size() << ") with error: " << status << "!\n";
return;
}
cudaMemcpy(d_indices_, mtx.getIndices().data(), mtx.getIndices().size() * sizeof(IndexType), cudaMemcpyHostToDevice);
status |= cudaMalloc(&d_emptyColIndices_, mtx.getEmptyColIndices().size() * sizeof(IndexType));
if (status) {
freeSplitPtr(d_vals_);
cudaFree(d_indices_);
std::cerr << "Error while allocating empty column indices (size: " << mtx.getEmptyColIndices().size() << ") with error: " << status << "!\n";
return;
}
cudaMemcpy(d_emptyColIndices_, mtx.getEmptyColIndices().data(), mtx.getEmptyColIndices().size() * sizeof(IndexType), cudaMemcpyHostToDevice);
emptyColNumber_ = mtx.getEmptyColIndices().size();
m_ = mtx.getM();
n_ = mtx.getN();
k_ = mtx.getK();
if (!vecInitialized_ || vecN_ != n_) {
setUpVectors(n_);
}
matInitialized_ = true;
clearHostSplitVec();
}
}
// returns ns for one execution, averaged over <loopCount_> runs
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runOwnNorm()
{
if (!vecInitialized_) {
return 0;
}
// run the Iterations at least once before measuring
OriginalPointerType result{}, lastResult{};
lastResult = result = cudaNorm1<BlockSize, ReadPartsNumber>(d_x_, vecN_, d_buffer_);
//std::cout << "calculating Norm of size: "<< vecN_ << std::endl;
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
result = cudaNorm1<BlockSize, ReadPartsNumber>(d_x_, vecN_, d_buffer_);
if (result != lastResult /*|| abs(result - lastResult) > 1e-19*/ ) {
std::cerr << "Error while calculating own norm! " << lastResult << " vs. " << result << "\n";
}
lastResult = result;
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
normResult_ = result;
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
// returns ns for one execution, averaged over <loopCount_> runs
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runAltOwnNorm()
{
if (!vecInitialized_) {
return 0;
}
// run the Iterations at least once before measuring
OriginalPointerType result{}, lastResult{};
lastResult = result = cudaNorm1<BlockSize, ReadPartsNumber>(d_x_, vecN_, d_split_buffer_, d_buffer_);
//std::cout << "calculating Norm of size: "<< vecN_ << std::endl;
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
result = cudaNorm1<BlockSize, ReadPartsNumber>(d_x_, vecN_, d_split_buffer_, d_buffer_);
if (result != lastResult /*|| abs(result - lastResult) > 1e-19*/ ) {
std::cerr << "Error while calculating own norm! " << lastResult << " vs. " << result << "\n";
}
lastResult = result;
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
normResult_ = result;
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
//works only if calculated with normal doubles
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runCuBlasNorm()
{
if (!vecInitialized_ || NumberSegments != 1 || std::is_same<OriginalPointerType, double>::value == false) {
return 0;
}
cublasStatus_t status;
cublasHandle_t handle;
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
std::cout << "INIT error with code " << status << std::endl;
/*
std::cout << "Error List:\n" << CUBLAS_STATUS_NOT_INITIALIZED << '\n'
<< CUBLAS_STATUS_ALLOC_FAILED << '\n'
<< CUBLAS_STATUS_INVALID_VALUE << '\n'
<< CUBLAS_STATUS_ARCH_MISMATCH << '\n'
<< CUBLAS_STATUS_MAPPING_ERROR << '\n'
<< CUBLAS_STATUS_EXECUTION_FAILED << '\n'
<< CUBLAS_STATUS_INTERNAL_ERROR << '\n'
<< CUBLAS_STATUS_NOT_SUPPORTED << '\n'
<< CUBLAS_STATUS_LICENSE_ERROR << '\n';
*/
return 0;
}
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST);
OriginalPointerType result{}, lastResult;
status = cublasDnrm2(handle, vecN_, d_buffer_, 1, &result);
lastResult = result;
if (status != CUBLAS_STATUS_SUCCESS) {
std::cout << "Norm error with code " << status << std::endl;
return 0;
}
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
status = cublasDnrm2(handle, vecN_, d_buffer_, 1, &result);
if (lastResult != result) {
std::cerr << "Big Error!\n";
}
lastResult = result;
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
normResult_ = result;
cublasDestroy(handle);
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runSpmv()
{
if (!matInitialized_ || !vecInitialized_ || vecN_ < m_ || m_ != n_) {
std::cout << matInitialized_ << ' ' << vecInitialized_ << ' ' << vecN_ << ' ' << m_ << 'x' << n_ << std::endl;
return 0;
}
const dim3 blockPageRank(BlockSize);
const dim3 gridPageRank((m_ - 1) / blockPageRank.x + 1);
// run the Iterations at least once before measuring
const OriginalPointerType dampingFactor = static_cast<OriginalPointerType>(0.85);
const OriginalPointerType correction = static_cast<OriginalPointerType>(0);
std::uint64_t status = 0;
ellPageRank<ReadPartsNumber, ReadPartsNumber><<<gridPageRank, blockPageRank>>>(
m_,
k_,
d_indices_,
d_vals_,
dampingFactor,
correction,
d_x_,
d_y_);
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
auto oldX = d_x_;
d_x_ = d_y_;
d_y_ = oldX;
ellPageRank<ReadPartsNumber, ReadPartsNumber><<<gridPageRank, blockPageRank>>>(
m_,
k_,
d_indices_,
d_vals_,
dampingFactor,
correction,
d_x_,
d_y_);
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
status |= cudaDeviceSynchronize();
if (status != 0) {
std::cerr << "Error with spmv: " << status << std::endl;
}
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
setUpHostSplitVec(m_);
copy(hostSplitVec_, d_y_, m_, cudaMemcpyDeviceToHost);
vectorResult_.resize(m_);
for (std::size_t i = 0; i < m_; ++i) {
vectorResult_[i] = hostSplitVec_.readAll(i);
}
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runRWTest()
{
if (!vecInitialized_) {
return 0;
}
const dim3 blockRW(BlockSize);
const dim3 gridRW((m_ - 1) / blockRW.x + 1);
//std::cout << "Running RW test with: " << gridRW.x << " x " << blockRW.x << ", Reading " << ReadPartsNumber << " Parts..." << std::endl;
rwTest<ReadPartsNumber><<<gridRW, blockRW>>>(vecN_, d_x_, 2.0);
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
rwTest<ReadPartsNumber><<<gridRW, blockRW>>>(vecN_, d_x_, 2.0);
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
setUpHostSplitVec(m_);
copy(hostSplitVec_, d_y_, m_, cudaMemcpyDeviceToHost);
vectorResult_.resize(m_);
for (std::size_t i = 0; i < m_; ++i) {
vectorResult_[i] = hostSplitVec_.readAll(i);
}
normResult_ = cudaNorm1<BlockSize, ReadPartsNumber>(d_x_, vecN_, d_buffer_);
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
void EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::testConversion()
{
if (!vecInitialized_) {
return;
}
constexpr std::size_t doubleValuesIn2Blocks = 8 * 128 / sizeof(typename SplitType::SegmentType);
OriginalPointerType hostVals[doubleValuesIn2Blocks];
std::size_t testSize = std::min(doubleValuesIn2Blocks, static_cast<std::size_t>(vecN_));
setUpHostSplitVec(testSize);
auto vectorValue = [](std::size_t i) { return static_cast<OriginalPointerType>(i) + OriginalPointerType{1} / static_cast<OriginalPointerType>(i); };
for (std::size_t i = 0; i < testSize; ++i) {
hostSplitVec_.writeAll(i, vectorValue(i));
}
copy(d_split_buffer_, hostSplitVec_, m_, cudaMemcpyHostToDevice);
convertToOriginal<<<1, testSize>>>(testSize, d_split_buffer_);
cudaMemcpy(hostVals, d_split_buffer_.segmentPtr, sizeof(OriginalPointerType) * testSize, cudaMemcpyDeviceToHost);
std::cout << std::scientific << std::setprecision(15);
for (std::size_t i = 0; i < testSize; ++i) {
if (hostVals[i] != vectorValue(i)) {
std::cout << std::setw(3) << i << ": Expected: " << vectorValue(i)
<< "\n " << "Actual: " << hostVals[i] << '\n';
std::cerr << "BIG ERROR!!! UNEQUAL!!!\n\n";
}
}
convertToSplitPointer<<<1,testSize>>>(testSize, d_split_buffer_);
copy(hostSplitVec_, d_split_buffer_, testSize, cudaMemcpyDeviceToHost);
for (std::size_t i = 0; i < testSize; ++i) {
OriginalPointerType curVal = hostSplitVec_.readAll(i);
if (curVal != vectorValue(i)) {
std::cout << std::setw(3) << i << ": Expected: " << vectorValue(i)
<< "\n " << "Actual: " << curVal << '\n';
std::cerr << "BIG ERROR!!! UNEQUAL!!!\n\n";
}
}
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runConversion()
{
if (!vecInitialized_ || !matInitialized_) {
return 0;
}
const dim3 blockVectorConversion(BlockSize);
const dim3 gridVectorConversion((vecN_ - 1) / blockVectorConversion.x + 1);
const dim3 blockMatrixConversion(BlockSize);
const dim3 gridMatrixConversion((k_ * m_ - 1) / blockMatrixConversion.x + 1);
// run the Iterations at least once before measuring
std::uint64_t status = 0;
for (int readPrec = 2; readPrec < NumberSegments; ++readPrec) {
normalizeVectorSelector<BlockSize>(readPrec, d_x_, vecN_, d_buffer_);
}
convertToOriginal<<<gridVectorConversion, blockVectorConversion>>>(vecN_, d_x_);
convertToOriginal<<<gridMatrixConversion, blockMatrixConversion>>>(k_*m_, d_vals_);
status |= cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
for (int readPrec = 2; readPrec < NumberSegments; ++readPrec) {
normalizeVectorSelector<BlockSize>(readPrec, d_x_, vecN_, d_buffer_);
}
convertToOriginal<<<gridVectorConversion, blockVectorConversion>>>(vecN_, d_x_);
convertToOriginal<<<gridMatrixConversion, blockMatrixConversion>>>(k_*m_, d_vals_);
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
if ((loopCount_ + 1) % 2 == 1) {
convertToOriginal<<<gridVectorConversion, blockVectorConversion>>>(vecN_, d_x_);
convertToOriginal<<<gridMatrixConversion, blockMatrixConversion>>>(k_*m_, d_vals_);
}
status |= cudaDeviceSynchronize();
if (status != 0) {
std::cerr << "Error with conversion: " << status << std::endl;
}
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runNormalization(int readPrec)
{
if (!vecInitialized_ || !matInitialized_) {
return 0;
}
// run the Iterations at least once before measuring
std::uint64_t status = 0;
normalizeVectorSelector<BlockSize>(readPrec, d_x_, vecN_, d_buffer_);
status |= cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
normalizeVectorSelector<BlockSize>(readPrec, d_x_, vecN_, d_buffer_);
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
status |= cudaDeviceSynchronize();
if (status != 0) {
std::cerr << "Error with vector normalization: " << status << std::endl;
}
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runDiffNorm()
{
if (!vecInitialized_) {
return 0;
}
// run the Iterations at least once before measuring
OriginalPointerType result{}, lastResult{};
lastResult = result = cudaNorm1Diff<BlockSize, ReadPartsNumber>(d_x_, d_y_, vecN_, d_buffer_);
//std::cout << "calculating Norm of size: "<< vecN_ << std::endl;
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
result = cudaNorm1Diff<BlockSize, ReadPartsNumber>(d_x_, d_y_, vecN_, d_buffer_);
if (result != lastResult) {
std::cerr << "Error while calculating diff norm! " << lastResult << " vs. " << result << "\n";
}
lastResult = result;
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
normResult_ = result;
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
template<SPLIT_POINTER_TEMPLATE_HEAD, typename IndexType>
template<int ReadPartsNumber, unsigned int BlockSize>
std::uint64_t EllBenchmark<SPLIT_POINTER_TEMPLATES, IndexType>::runSelectiveNorm()
{
if (!vecInitialized_) {
return 0;
}
// run the Iterations at least once before measuring
OriginalPointerType result{}, lastResult{};
lastResult = result = cudaSelectiveNorm1<BlockSize, ReadPartsNumber>(d_x_, emptyColNumber_, d_emptyColIndices_, d_buffer_);
//std::cout << "calculating Norm of size: "<< vecN_ << std::endl;
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (std::size_t i = 0; i < loopCount_; ++i) {
result = cudaSelectiveNorm1<BlockSize, ReadPartsNumber>(d_x_, emptyColNumber_, d_emptyColIndices_, d_buffer_);
if (result != lastResult) {
std::cerr << "Error while calculating selective norm! " << lastResult << " vs. " << result << "\n";
}
lastResult = result;
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
normResult_ = result;
std::uint64_t totalRuntime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
return totalRuntime / loopCount_;
}
|
ff5efb2f692b62301e01184a1672727de32d7609.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "Wiggle.h"
#include <deque>
#include "WFObjUtils.h"
#include "SVD.h"
#include "DebugUtils.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
class LocalCoordsEstimator
{
static const bool USE_PCA = false;
public:
uint2* vertexRanges;
float3* vertexBuffer;
double* tmpCovMatrix;
double* tmpDiagonalW;
double* tmpMatrixV;
double* tmpVecRV;
float3* outTranslation;
quaternion4f* outRotation;
LocalCoordsEstimator(
uint2* aRanges,
float3* aBuffer,
double* aCovMatrix,
double* aDiagonalW,
double* aMatrixV,
double* aVecRV,
float3* aOutTranslation,
quaternion4f* aOutRot
) :
vertexRanges(aRanges),
vertexBuffer(aBuffer),
tmpCovMatrix(aCovMatrix),
tmpDiagonalW(aDiagonalW),
tmpMatrixV(aMatrixV),
tmpVecRV(aVecRV),
outTranslation(aOutTranslation),
outRotation(aOutRot)
{}
__host__ __device__ void operator()(const size_t& aId)
{
const unsigned int objId = (unsigned)aId;
//Compute the mean of the vertex locations
float3 center = make_float3(0.f, 0.f, 0.f);
uint2 vtxRange = vertexRanges[objId];
unsigned int vtxCount = vtxRange.y - vtxRange.x;
float numPoints = (float)vtxCount;
for (unsigned int vtxId = 0; vtxId < vtxCount; ++vtxId)
{
center += vertexBuffer[vtxRange.x + vtxId];
}
center /= numPoints;
outTranslation[aId] = center;
//Find the vertex furthest away from the center
float3 vtx0 = center;
float dist0 = 0.f;
float count = 0.f;
for (unsigned int vtxId = 0; vtxId < vtxCount; ++vtxId)
{
const float3 vec = vertexBuffer[vtxRange.x + vtxId];
const float3 delta = vec - center;
const float distSQR = dot(delta, delta);
if (distSQR > dist0 && distSQR - dist0 > 0.001f * dist0)
{
vtx0 = vec;
dist0 = distSQR;
count = 1.f;
}
else if (fabsf(dist0 - distSQR) < 0.001f * dist0)
{
vtx0 += vec;
count += 1.f;
}
}
if(count > 1.f)
vtx0 /= count;
count = 0.f;
//Find the other end of the diameter
float3 vtx1 = vtx0;
float diameter = 0.f;
for (unsigned int vtxId = 0; vtxId < vtxCount; ++vtxId)
{
const float3 vec = vertexBuffer[vtxRange.x + vtxId];
const float3 delta = vec - vtx0;
const float distSQR = dot(delta, delta);
if (distSQR > diameter && distSQR - diameter > 0.001f * diameter)
{
vtx1 = vec;
diameter = distSQR;
count = 1.f;
}
else if (fabsf(diameter - distSQR) < 0.001f * diameter)
{
vtx1 += vec;
count += 1.f;
}
}
if(count > 1.f)
vtx1 /= count;
const float3 dir0 = ~(vtx1 - vtx0);
//Find the vertex furthest away from the diameter
float3 vtx2 = vtx0;
float dist2 = 0.f;
count = 0.f;
for (unsigned int vtxId = 0; vtxId < vtxCount; ++vtxId)
{
const float3 vec = vertexBuffer[vtxRange.x + vtxId];
const float3 delta = cross(dir0, vec - vtx0);
const float distSQR = dot(delta, delta);
const float distCenterSQR = dot(vec - center, vec - center);
if (distSQR >= dist2 && distSQR - dist2 > 0.01f * dist2)
{
vtx2 = vec;
dist2 = distSQR;
count = 1.f;
}
else if (fabsf(dist2 - distSQR) < 0.01f * dist2)
{
vtx2 += vec;
count += 1.f;
}
}
if (count > 1.f)
vtx2 /= count;
//vtx0 = vertexBuffer[vtxRange.x + 0];
//vtx1 = vertexBuffer[vtxRange.x + 1];
//vtx2 = vertexBuffer[vtxRange.x + 2];
const float3 dir1 = ~((vtx2 - vtx0) - dir0 * dot(vtx2 - vtx0, dir0));
const float3 dir2 = ~cross(dir0, dir1);
float rotDet = determinant(
dir0.x, dir1.x, dir2.x,
dir0.y, dir1.y, dir2.y,
dir0.z, dir1.z, dir2.z
);
outRotation[aId] = quaternion4f(
dir0.x, dir1.x, dir2.x,
dir0.y, dir1.y, dir2.y,
dir0.z, dir1.z, dir2.z
);
if (USE_PCA)
{
//TODO: covMat currently is transpose(X)*X. We need SVD(X) instead
//Compute covariance matrix
double* covMat = tmpCovMatrix + aId * 3;
for (unsigned int vtxId = 0; vtxId < vtxCount; ++vtxId)
{
float3 vec1 = vertexBuffer[vtxRange.x + vtxId] - center;
covMat[0 * 3 + 0] += (double)vec1.x * vec1.x;
covMat[1 * 3 + 0] += (double)vec1.y * vec1.x;
covMat[2 * 3 + 0] += (double)vec1.z * vec1.x;
covMat[0 * 3 + 1] += (double)vec1.x * vec1.y;
covMat[1 * 3 + 1] += (double)vec1.y * vec1.y;
covMat[2 * 3 + 1] += (double)vec1.z * vec1.y;
covMat[0 * 3 + 2] += (double)vec1.x * vec1.z;
covMat[1 * 3 + 2] += (double)vec1.y * vec1.z;
covMat[2 * 3 + 2] += (double)vec1.z * vec1.z;
}
//Singular Value Decomposition
double* diag = tmpDiagonalW + aId * 3;
double* vMat = tmpMatrixV + aId * 3 * 3;
double* tmp = tmpVecRV + aId * 3;
svd::svdcmp(covMat, 3, 3, diag, vMat, tmp);
const float3 col0 = make_float3((float)vMat[0], (float)vMat[1], (float)vMat[2]);
const float3 col1 = make_float3((float)vMat[3], (float)vMat[4], (float)vMat[5]);
const float3 col2 = make_float3((float)vMat[6], (float)vMat[7], (float)vMat[8]);
float rotDet = determinant(
col0.x, col1.x, col2.x,
col0.y, col1.y, col2.y,
col0.z, col1.z, col2.z
);
if (rotDet < 0.f)
{
vMat[0] = -vMat[0];
vMat[1] = -vMat[1];
vMat[2] = -vMat[2];
rotDet = -rotDet;
}
if (fabsf(rotDet - 1.0f) <= 0.01f)
{
quaternion4f rotation(
col0.x, col1.x, col2.x,
col0.y, col1.y, col2.y,
col0.z, col1.z, col2.z
);
outRotation[aId] = ~rotation;
}
}
}
};
class TransformationExtractor
{
public:
thrust::device_ptr<unsigned int> nodeTypes;
thrust::device_ptr<unsigned int> outNeighborTypeKeys;
thrust::device_ptr<unsigned int> outNeighborTypeVals;
thrust::device_ptr<float3> translation;
thrust::device_ptr<quaternion4f> rotation;
thrust::device_ptr<float3> outTranslation;
thrust::device_ptr<quaternion4f> outRotation;
thrust::device_ptr<quaternion4f> outRotationAbs;
TransformationExtractor(
thrust::device_ptr<unsigned int> aNodeTypes,
thrust::device_ptr<unsigned int> aOutNbrTypeKeys,
thrust::device_ptr<unsigned int> aOutNbrTypeVals,
thrust::device_ptr<float3> aTranslation,
thrust::device_ptr<quaternion4f> aRotation,
thrust::device_ptr<float3> aOutTranslation,
thrust::device_ptr<quaternion4f> aOutRotation,
thrust::device_ptr<quaternion4f> aOutRotationAbs
) :
nodeTypes(aNodeTypes),
outNeighborTypeKeys(aOutNbrTypeKeys),
outNeighborTypeVals(aOutNbrTypeVals),
translation(aTranslation),
rotation(aRotation),
outTranslation(aOutTranslation),
outRotation(aOutRotation),
outRotationAbs(aOutRotationAbs)
{}
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
const unsigned int nodeId1 = thrust::get<0>(t);
const unsigned int nodeId2 = thrust::get<1>(t);
const unsigned int outId = (unsigned)thrust::get<2>(t);
outNeighborTypeKeys[outId] = nodeTypes[nodeId1];
outNeighborTypeVals[outId] = nodeTypes[nodeId2];
quaternion4f rot1 = rotation[nodeId1];
outTranslation[outId] = transformVec(rot1.conjugate(), translation[nodeId2] - translation[nodeId1]);
quaternion4f rot2 = rotation[nodeId2];
outRotation[outId] = rot2.conjugate() * rot1;
outRotationAbs[outId] = rot1.conjugate();
}
};
__host__ void Wiggle::init(WFObject & aObj, Graph & aGraph)
{
seed = (unsigned int)std::chrono::system_clock::now().time_since_epoch().count();
float3 minBound, maxBound;
ObjectBoundsExporter()(aObj, minBound, maxBound);
spatialTolerance = ::max(0.01f * len(maxBound - minBound), spatialTolerance);
//Unpack and upload the vertex buffer
thrust::host_vector<uint2> vertexRangesHost;
thrust::host_vector<float3> vertexBufferHost;
VertexBufferUnpacker unpackVertices;
unpackVertices(aObj, vertexRangesHost, vertexBufferHost);
thrust::device_vector<uint2> vertexRangesDevice(vertexRangesHost);
thrust::device_vector<float3> vertexBufferDevice(vertexBufferHost);
//#ifdef _DEBUG
// outputDeviceVector("vertex ranges: ", vertexRangesDevice);
// outputDeviceVector("vertex buffer: ", vertexBufferDevice);
//#endif
//Use PCA to compute local coordiante system for each object
thrust::device_vector<float3> outTranslation(aObj.getNumObjects());
thrust::device_vector<quaternion4f> outRotation(aObj.getNumObjects());
thrust::device_vector<double> tmpCovMatrix(aObj.getNumObjects() * 3 * 3, 0.f);
thrust::device_vector<double> tmpDiagonalW(aObj.getNumObjects() * 3);
thrust::device_vector<double> tmpMatrixV(aObj.getNumObjects() * 3 * 3);
thrust::device_vector<double> tmpVecRV(aObj.getNumObjects() * 3);
LocalCoordsEstimator estimateT(
thrust::raw_pointer_cast(vertexRangesDevice.data()),
thrust::raw_pointer_cast(vertexBufferDevice.data()),
thrust::raw_pointer_cast(tmpCovMatrix.data()),
thrust::raw_pointer_cast(tmpDiagonalW.data()),
thrust::raw_pointer_cast(tmpMatrixV.data()),
thrust::raw_pointer_cast(tmpVecRV.data()),
thrust::raw_pointer_cast(outTranslation.data()),
thrust::raw_pointer_cast(outRotation.data())
);
thrust::counting_iterator<size_t> first(0u);
thrust::counting_iterator<size_t> last(aObj.getNumObjects());
thrust::for_each(first, last, estimateT);
//#ifdef _DEBUG
// outputDeviceVector("translations: ", outTranslation);
// outputDeviceVector("rotations: ", outRotation);
//#endif
//Extract and upload node type information
thrust::host_vector<unsigned int> nodeTypesHost(aGraph.numNodes(), (unsigned int)aObj.materials.size());
for (size_t nodeId = 0; nodeId < aObj.objects.size(); ++nodeId)
{
size_t faceId = aObj.objects[nodeId].x;
size_t materialId = aObj.faces[faceId].material;
nodeTypesHost[nodeId] = (unsigned int)materialId;
}
thrust::device_vector<unsigned int> nodeTypes(nodeTypesHost);
thrust::device_vector<unsigned int> neighborTypeKeys(aGraph.numEdges() * 2u);
thrust::device_vector<unsigned int> neighborTypeVals(aGraph.numEdges() * 2u);
thrust::device_vector<float3> relativeTranslation(aGraph.numEdges() * 2u);
thrust::device_vector<quaternion4f> relativeRotation(aGraph.numEdges() * 2u);
thrust::device_vector<quaternion4f> absoluteRotation(aGraph.numEdges() * 2u);
TransformationExtractor extractRelativeT(
nodeTypes.data(),
neighborTypeKeys.data(),
neighborTypeVals.data(),
outTranslation.data(),
outRotation.data(),
relativeTranslation.data(),
relativeRotation.data(),
absoluteRotation.data()
);
thrust::counting_iterator<size_t> lastEdge(aGraph.numEdges() * 2u);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(aGraph.adjacencyKeys.begin(), aGraph.adjacencyVals.begin(), first)),
thrust::make_zip_iterator(thrust::make_tuple(aGraph.adjacencyKeys.end(), aGraph.adjacencyVals.end(), lastEdge)),
extractRelativeT);
if(mNeighborTypeKeys.size() == 0u)
{
//first call of init
mNeighborTypeKeys = thrust::host_vector<unsigned int>(neighborTypeKeys);
mNeighborTypeVals = thrust::host_vector<unsigned int>(neighborTypeVals);
mRelativeTranslation = thrust::host_vector<float3>(relativeTranslation);
mRelativeRotation = thrust::host_vector<quaternion4f>(relativeRotation);
mAbsoluteRotation = thrust::host_vector<quaternion4f>(absoluteRotation);
}
else
{
//init already called, append new data
size_t oldCount = mNeighborTypeKeys.size();
mNeighborTypeKeys.resize(oldCount + neighborTypeKeys.size());
mNeighborTypeVals.resize(oldCount + neighborTypeVals.size());
mRelativeTranslation.resize(oldCount + relativeTranslation.size());
mRelativeRotation.resize(oldCount + relativeRotation.size());
mAbsoluteRotation.resize(oldCount + absoluteRotation.size());
thrust::copy(neighborTypeKeys.begin(), neighborTypeKeys.end(), mNeighborTypeKeys.begin() + oldCount);
thrust::copy(neighborTypeVals.begin(), neighborTypeVals.end(), mNeighborTypeVals.begin() + oldCount);
thrust::copy(relativeTranslation.begin(), relativeTranslation.end(), mRelativeTranslation.begin() + oldCount);
thrust::copy(relativeRotation.begin(), relativeRotation.end(), mRelativeRotation.begin() + oldCount);
thrust::copy(absoluteRotation.begin(), absoluteRotation.end(), mAbsoluteRotation.begin() + oldCount);
}
//sort by node type
thrust::sort_by_key(
mNeighborTypeKeys.begin(),
mNeighborTypeKeys.end(),
thrust::make_zip_iterator(thrust::make_tuple(mNeighborTypeVals.begin(), mRelativeTranslation.begin(), mRelativeRotation.begin(), mAbsoluteRotation.begin()))
);
//setup search intervals for each node type
mIntervals.resize(aObj.materials.size() + 1u, 0u);
for (size_t i = 0u; i < mNeighborTypeKeys.size() - 1u; ++i)
{
if (mNeighborTypeKeys[i] < mNeighborTypeKeys[i + 1u])
{
mIntervals[mNeighborTypeKeys[i] + 1] = (unsigned)i + 1u;
}
}
//last element
if (mNeighborTypeKeys.size() > 0u)
mIntervals[mNeighborTypeKeys[mNeighborTypeKeys.size() - 1u] + 1] = (unsigned)mNeighborTypeKeys.size();
//fill gaps due to missing node types
for (size_t i = 1u; i < mIntervals.size(); ++i)
{
mIntervals[i] = ::max(mIntervals[i - 1u], mIntervals[i]);
}
#ifdef _DEBUG
outputHostVector("translations: ", mRelativeTranslation);
outputHostVector("rotations: ", mRelativeRotation);
#endif
}
__host__ void Wiggle::fixRelativeTransformations(WFObject & aObj, Graph & aGraph)
{
numCorrections = 0u;
size_t numNodes = aObj.objects.size();
thrust::host_vector<unsigned int> visited(numNodes, 0u);
thrust::host_vector<unsigned int> intervalsHost(aGraph.intervals);
thrust::host_vector<unsigned int> adjacencyValsHost(aGraph.adjacencyVals);
//Extract and upload node type information
thrust::host_vector<unsigned int> nodeTypesHost(aGraph.numNodes(), (unsigned int)aObj.materials.size());
for (size_t nodeId = 0; nodeId < aObj.objects.size(); ++nodeId)
{
size_t faceId = aObj.objects[nodeId].x;
size_t materialId = aObj.faces[faceId].material;
nodeTypesHost[nodeId] = (unsigned int)materialId;
}
if (seedNodeId >= (unsigned int)numNodes)
{
std::default_random_engine generator(seed);
std::uniform_int_distribution<unsigned int> distribution(0u, (unsigned int)numNodes - 1u);
seedNodeId = distribution(generator);
}
std::deque<unsigned int> frontier;
frontier.push_back(seedNodeId);
visited[seedNodeId] = 1u;
while (!frontier.empty())
{
const unsigned int nodeId = frontier.front();
frontier.pop_front();
processNeighbors(
aObj,
nodeId,
visited,
intervalsHost,
adjacencyValsHost,
nodeTypesHost);
for (unsigned int nbrId = intervalsHost[nodeId]; nbrId < intervalsHost[nodeId + 1]; ++nbrId)
{
const unsigned int nodeId = adjacencyValsHost[nbrId];
if (visited[nodeId] == 0u)
{
frontier.push_back(nodeId);
visited[nodeId] = 1u;
}
}
}
}
__host__ void Wiggle::processNeighbors(
WFObject& aObj,
unsigned int aObjId,
thrust::host_vector<unsigned int>& visited,
thrust::host_vector<unsigned int>& intervalsHost,
thrust::host_vector<unsigned int>& adjacencyValsHost,
thrust::host_vector<unsigned int>& nodeTypeIds)
{
const unsigned int nbrCount = intervalsHost[aObjId + 1u] - intervalsHost[aObjId];
if (nbrCount == 0)
return;
const unsigned int nodeCount = nbrCount + 1u;
thrust::host_vector<unsigned int> nodeIds(nodeCount, aObjId);
thrust::copy(adjacencyValsHost.begin() + intervalsHost[aObjId], adjacencyValsHost.begin() + intervalsHost[aObjId + 1], nodeIds.begin() + 1u);
thrust::host_vector<float3> vertexBufferHost;
thrust::host_vector<uint2> vtxRanges;
VertexBufferUnpacker unpackVertices;
unpackVertices(aObj, nodeIds, vtxRanges, vertexBufferHost);
//Use PCA to compute local coordiante system for each object
thrust::host_vector<float3> translations(nodeCount);
thrust::host_vector<quaternion4f> rotations(nodeCount);
thrust::host_vector<double> tmpCovMatrix(nodeCount * 3 * 3, 0.f);
thrust::host_vector<double> tmpDiagonalW(nodeCount * 3);
thrust::host_vector<double> tmpMatrixV(nodeCount * 3 * 3);
thrust::host_vector<double> tmpVecRV(nodeCount * 3);
LocalCoordsEstimator estimateT(
thrust::raw_pointer_cast(vtxRanges.data()),
thrust::raw_pointer_cast(vertexBufferHost.data()),
thrust::raw_pointer_cast(tmpCovMatrix.data()),
thrust::raw_pointer_cast(tmpDiagonalW.data()),
thrust::raw_pointer_cast(tmpMatrixV.data()),
thrust::raw_pointer_cast(tmpVecRV.data()),
thrust::raw_pointer_cast(translations.data()),
thrust::raw_pointer_cast(rotations.data())
);
//thrust::counting_iterator<size_t> first(0u);
//thrust::counting_iterator<size_t> last(nodeCount);
//thrust::for_each(first, last, estimateT);
for (unsigned int i = 0u; i < nodeCount; ++i)
{
estimateT(i);
}
if (debugOutputLocalFrames)
{
transformObj(aObj, nodeIds[0], translations[0], make_float3(0.f, 0.f, 0.f), rotations[0].conjugate());
return;
}
const unsigned int nodeId1 = nodeIds[0];
for (unsigned int i = 1; i < nodeIds.size(); i++)
{
const unsigned int nodeId2 = nodeIds[i];
if (visited[nodeId2])
continue;
const unsigned int typeId1 = nodeTypeIds[nodeId1];
const unsigned int typeId2 = nodeTypeIds[nodeId2];
quaternion4f rot = rotations[0];
float3 relativeT = transformVec(rot.conjugate(), translations[i] - translations[0]);
quaternion4f relativeR = rotations[i].conjugate() * rot;
float3 bestT = relativeT;
quaternion4f bestR = relativeR;
quaternion4f bestA = relativeR;
findBestMatch(typeId1, typeId2, relativeT, relativeR, bestT, bestR, bestA);
const float angleDelta = fabsf(fabsf((bestR * relativeR.conjugate()).w) - 1.f);
if (angleDelta < angleTolerance)
continue;
float3 translateDelta = (0.25f) * transformVec(rot, bestT - relativeT);
transformObj(aObj, nodeId2, translations[i], translateDelta, rotations[i] * bestR * rot.conjugate());
++numCorrections;
}
}
__host__ void Wiggle::findBestMatch(
unsigned int aTypeId1,
unsigned int aTypeId2,
const float3& aTranslation,
const quaternion4f& aRotation,
float3& oTranslation,
quaternion4f& oRotation,
quaternion4f& oAbsRotation)
{
float bestSpatialDist = FLT_MAX;
for (unsigned int id = mIntervals[aTypeId1]; id < mIntervals[aTypeId1 + 1]; id++)
{
if (mNeighborTypeVals[id] != aTypeId2)
continue;
const float3 delta = mRelativeTranslation[id] - aTranslation;
const float currentSpatialDist = len(delta);
if (currentSpatialDist < bestSpatialDist)
{
bestSpatialDist = currentSpatialDist;
}
}
float bestAngleDist = FLT_MAX;
for (unsigned int id = mIntervals[aTypeId1]; id < mIntervals[aTypeId1 + 1]; id++)
{
if (mNeighborTypeVals[id] != aTypeId2)
continue;
const float3 delta = mRelativeTranslation[id] - aTranslation;
const float currentSpatialDist = len(delta);
const float angleDelta = fabsf(fabsf((aRotation * mRelativeRotation[id].conjugate()).w) - 1.f);
if (currentSpatialDist < spatialTolerance + bestSpatialDist && angleDelta < bestAngleDist)
{
bestAngleDist = angleDelta;
oTranslation = mRelativeTranslation[id];
oRotation = mRelativeRotation[id];
oAbsRotation = mAbsoluteRotation[id];
}
}
}
__host__ void Wiggle::transformObj(
WFObject & aObj,
unsigned int aObjId,
const float3 & aObjCenter,
const float3 & aTranslation,
const quaternion4f & aRotation)
{
thrust::host_vector<unsigned int> processed(aObj.getNumVertices(), 0u);
for (int faceId = aObj.objects[aObjId].x; faceId < aObj.objects[aObjId].y; ++faceId)
{
WFObject::Face face = aObj.faces[faceId];
size_t vtxId1 = aObj.faces[faceId].vert1;
size_t vtxId2 = aObj.faces[faceId].vert2;
size_t vtxId3 = aObj.faces[faceId].vert3;
if (processed[vtxId1] == 0u)
{
processed[vtxId1] = 1u;
float3 vtx = aObj.vertices[vtxId1];
aObj.vertices[vtxId1] = transformVec(aRotation, vtx - aObjCenter) + aObjCenter + aTranslation;
}
if (processed[vtxId2] == 0u)
{
processed[vtxId2] = 1u;
float3 vtx = aObj.vertices[vtxId2];
aObj.vertices[vtxId2] = transformVec(aRotation, vtx - aObjCenter) + aObjCenter + aTranslation;
}
if (processed[vtxId3] == 0u)
{
processed[vtxId3] = 1u;
float3 vtx = aObj.vertices[vtxId3];
aObj.vertices[vtxId3] = transformVec(aRotation, vtx - aObjCenter) + aObjCenter + aTranslation;
}
}
} | ff5efb2f692b62301e01184a1672727de32d7609.cu | #include "pch.h"
#include "Wiggle.h"
#include <deque>
#include "WFObjUtils.h"
#include "SVD.h"
#include "DebugUtils.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
class LocalCoordsEstimator
{
static const bool USE_PCA = false;
public:
uint2* vertexRanges;
float3* vertexBuffer;
double* tmpCovMatrix;
double* tmpDiagonalW;
double* tmpMatrixV;
double* tmpVecRV;
float3* outTranslation;
quaternion4f* outRotation;
LocalCoordsEstimator(
uint2* aRanges,
float3* aBuffer,
double* aCovMatrix,
double* aDiagonalW,
double* aMatrixV,
double* aVecRV,
float3* aOutTranslation,
quaternion4f* aOutRot
) :
vertexRanges(aRanges),
vertexBuffer(aBuffer),
tmpCovMatrix(aCovMatrix),
tmpDiagonalW(aDiagonalW),
tmpMatrixV(aMatrixV),
tmpVecRV(aVecRV),
outTranslation(aOutTranslation),
outRotation(aOutRot)
{}
__host__ __device__ void operator()(const size_t& aId)
{
const unsigned int objId = (unsigned)aId;
//Compute the mean of the vertex locations
float3 center = make_float3(0.f, 0.f, 0.f);
uint2 vtxRange = vertexRanges[objId];
unsigned int vtxCount = vtxRange.y - vtxRange.x;
float numPoints = (float)vtxCount;
for (unsigned int vtxId = 0; vtxId < vtxCount; ++vtxId)
{
center += vertexBuffer[vtxRange.x + vtxId];
}
center /= numPoints;
outTranslation[aId] = center;
//Find the vertex furthest away from the center
float3 vtx0 = center;
float dist0 = 0.f;
float count = 0.f;
for (unsigned int vtxId = 0; vtxId < vtxCount; ++vtxId)
{
const float3 vec = vertexBuffer[vtxRange.x + vtxId];
const float3 delta = vec - center;
const float distSQR = dot(delta, delta);
if (distSQR > dist0 && distSQR - dist0 > 0.001f * dist0)
{
vtx0 = vec;
dist0 = distSQR;
count = 1.f;
}
else if (fabsf(dist0 - distSQR) < 0.001f * dist0)
{
vtx0 += vec;
count += 1.f;
}
}
if(count > 1.f)
vtx0 /= count;
count = 0.f;
//Find the other end of the diameter
float3 vtx1 = vtx0;
float diameter = 0.f;
for (unsigned int vtxId = 0; vtxId < vtxCount; ++vtxId)
{
const float3 vec = vertexBuffer[vtxRange.x + vtxId];
const float3 delta = vec - vtx0;
const float distSQR = dot(delta, delta);
if (distSQR > diameter && distSQR - diameter > 0.001f * diameter)
{
vtx1 = vec;
diameter = distSQR;
count = 1.f;
}
else if (fabsf(diameter - distSQR) < 0.001f * diameter)
{
vtx1 += vec;
count += 1.f;
}
}
if(count > 1.f)
vtx1 /= count;
const float3 dir0 = ~(vtx1 - vtx0);
//Find the vertex furthest away from the diameter
float3 vtx2 = vtx0;
float dist2 = 0.f;
count = 0.f;
for (unsigned int vtxId = 0; vtxId < vtxCount; ++vtxId)
{
const float3 vec = vertexBuffer[vtxRange.x + vtxId];
const float3 delta = cross(dir0, vec - vtx0);
const float distSQR = dot(delta, delta);
const float distCenterSQR = dot(vec - center, vec - center);
if (distSQR >= dist2 && distSQR - dist2 > 0.01f * dist2)
{
vtx2 = vec;
dist2 = distSQR;
count = 1.f;
}
else if (fabsf(dist2 - distSQR) < 0.01f * dist2)
{
vtx2 += vec;
count += 1.f;
}
}
if (count > 1.f)
vtx2 /= count;
//vtx0 = vertexBuffer[vtxRange.x + 0];
//vtx1 = vertexBuffer[vtxRange.x + 1];
//vtx2 = vertexBuffer[vtxRange.x + 2];
const float3 dir1 = ~((vtx2 - vtx0) - dir0 * dot(vtx2 - vtx0, dir0));
const float3 dir2 = ~cross(dir0, dir1);
float rotDet = determinant(
dir0.x, dir1.x, dir2.x,
dir0.y, dir1.y, dir2.y,
dir0.z, dir1.z, dir2.z
);
outRotation[aId] = quaternion4f(
dir0.x, dir1.x, dir2.x,
dir0.y, dir1.y, dir2.y,
dir0.z, dir1.z, dir2.z
);
if (USE_PCA)
{
//TODO: covMat currently is transpose(X)*X. We need SVD(X) instead
//Compute covariance matrix
double* covMat = tmpCovMatrix + aId * 3;
for (unsigned int vtxId = 0; vtxId < vtxCount; ++vtxId)
{
float3 vec1 = vertexBuffer[vtxRange.x + vtxId] - center;
covMat[0 * 3 + 0] += (double)vec1.x * vec1.x;
covMat[1 * 3 + 0] += (double)vec1.y * vec1.x;
covMat[2 * 3 + 0] += (double)vec1.z * vec1.x;
covMat[0 * 3 + 1] += (double)vec1.x * vec1.y;
covMat[1 * 3 + 1] += (double)vec1.y * vec1.y;
covMat[2 * 3 + 1] += (double)vec1.z * vec1.y;
covMat[0 * 3 + 2] += (double)vec1.x * vec1.z;
covMat[1 * 3 + 2] += (double)vec1.y * vec1.z;
covMat[2 * 3 + 2] += (double)vec1.z * vec1.z;
}
//Singular Value Decomposition
double* diag = tmpDiagonalW + aId * 3;
double* vMat = tmpMatrixV + aId * 3 * 3;
double* tmp = tmpVecRV + aId * 3;
svd::svdcmp(covMat, 3, 3, diag, vMat, tmp);
const float3 col0 = make_float3((float)vMat[0], (float)vMat[1], (float)vMat[2]);
const float3 col1 = make_float3((float)vMat[3], (float)vMat[4], (float)vMat[5]);
const float3 col2 = make_float3((float)vMat[6], (float)vMat[7], (float)vMat[8]);
float rotDet = determinant(
col0.x, col1.x, col2.x,
col0.y, col1.y, col2.y,
col0.z, col1.z, col2.z
);
if (rotDet < 0.f)
{
vMat[0] = -vMat[0];
vMat[1] = -vMat[1];
vMat[2] = -vMat[2];
rotDet = -rotDet;
}
if (fabsf(rotDet - 1.0f) <= 0.01f)
{
quaternion4f rotation(
col0.x, col1.x, col2.x,
col0.y, col1.y, col2.y,
col0.z, col1.z, col2.z
);
outRotation[aId] = ~rotation;
}
}
}
};
class TransformationExtractor
{
public:
thrust::device_ptr<unsigned int> nodeTypes;
thrust::device_ptr<unsigned int> outNeighborTypeKeys;
thrust::device_ptr<unsigned int> outNeighborTypeVals;
thrust::device_ptr<float3> translation;
thrust::device_ptr<quaternion4f> rotation;
thrust::device_ptr<float3> outTranslation;
thrust::device_ptr<quaternion4f> outRotation;
thrust::device_ptr<quaternion4f> outRotationAbs;
TransformationExtractor(
thrust::device_ptr<unsigned int> aNodeTypes,
thrust::device_ptr<unsigned int> aOutNbrTypeKeys,
thrust::device_ptr<unsigned int> aOutNbrTypeVals,
thrust::device_ptr<float3> aTranslation,
thrust::device_ptr<quaternion4f> aRotation,
thrust::device_ptr<float3> aOutTranslation,
thrust::device_ptr<quaternion4f> aOutRotation,
thrust::device_ptr<quaternion4f> aOutRotationAbs
) :
nodeTypes(aNodeTypes),
outNeighborTypeKeys(aOutNbrTypeKeys),
outNeighborTypeVals(aOutNbrTypeVals),
translation(aTranslation),
rotation(aRotation),
outTranslation(aOutTranslation),
outRotation(aOutRotation),
outRotationAbs(aOutRotationAbs)
{}
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
const unsigned int nodeId1 = thrust::get<0>(t);
const unsigned int nodeId2 = thrust::get<1>(t);
const unsigned int outId = (unsigned)thrust::get<2>(t);
outNeighborTypeKeys[outId] = nodeTypes[nodeId1];
outNeighborTypeVals[outId] = nodeTypes[nodeId2];
quaternion4f rot1 = rotation[nodeId1];
outTranslation[outId] = transformVec(rot1.conjugate(), translation[nodeId2] - translation[nodeId1]);
quaternion4f rot2 = rotation[nodeId2];
outRotation[outId] = rot2.conjugate() * rot1;
outRotationAbs[outId] = rot1.conjugate();
}
};
__host__ void Wiggle::init(WFObject & aObj, Graph & aGraph)
{
seed = (unsigned int)std::chrono::system_clock::now().time_since_epoch().count();
float3 minBound, maxBound;
ObjectBoundsExporter()(aObj, minBound, maxBound);
spatialTolerance = std::max(0.01f * len(maxBound - minBound), spatialTolerance);
//Unpack and upload the vertex buffer
thrust::host_vector<uint2> vertexRangesHost;
thrust::host_vector<float3> vertexBufferHost;
VertexBufferUnpacker unpackVertices;
unpackVertices(aObj, vertexRangesHost, vertexBufferHost);
thrust::device_vector<uint2> vertexRangesDevice(vertexRangesHost);
thrust::device_vector<float3> vertexBufferDevice(vertexBufferHost);
//#ifdef _DEBUG
// outputDeviceVector("vertex ranges: ", vertexRangesDevice);
// outputDeviceVector("vertex buffer: ", vertexBufferDevice);
//#endif
//Use PCA to compute local coordiante system for each object
thrust::device_vector<float3> outTranslation(aObj.getNumObjects());
thrust::device_vector<quaternion4f> outRotation(aObj.getNumObjects());
thrust::device_vector<double> tmpCovMatrix(aObj.getNumObjects() * 3 * 3, 0.f);
thrust::device_vector<double> tmpDiagonalW(aObj.getNumObjects() * 3);
thrust::device_vector<double> tmpMatrixV(aObj.getNumObjects() * 3 * 3);
thrust::device_vector<double> tmpVecRV(aObj.getNumObjects() * 3);
LocalCoordsEstimator estimateT(
thrust::raw_pointer_cast(vertexRangesDevice.data()),
thrust::raw_pointer_cast(vertexBufferDevice.data()),
thrust::raw_pointer_cast(tmpCovMatrix.data()),
thrust::raw_pointer_cast(tmpDiagonalW.data()),
thrust::raw_pointer_cast(tmpMatrixV.data()),
thrust::raw_pointer_cast(tmpVecRV.data()),
thrust::raw_pointer_cast(outTranslation.data()),
thrust::raw_pointer_cast(outRotation.data())
);
thrust::counting_iterator<size_t> first(0u);
thrust::counting_iterator<size_t> last(aObj.getNumObjects());
thrust::for_each(first, last, estimateT);
//#ifdef _DEBUG
// outputDeviceVector("translations: ", outTranslation);
// outputDeviceVector("rotations: ", outRotation);
//#endif
//Extract and upload node type information
thrust::host_vector<unsigned int> nodeTypesHost(aGraph.numNodes(), (unsigned int)aObj.materials.size());
for (size_t nodeId = 0; nodeId < aObj.objects.size(); ++nodeId)
{
size_t faceId = aObj.objects[nodeId].x;
size_t materialId = aObj.faces[faceId].material;
nodeTypesHost[nodeId] = (unsigned int)materialId;
}
thrust::device_vector<unsigned int> nodeTypes(nodeTypesHost);
thrust::device_vector<unsigned int> neighborTypeKeys(aGraph.numEdges() * 2u);
thrust::device_vector<unsigned int> neighborTypeVals(aGraph.numEdges() * 2u);
thrust::device_vector<float3> relativeTranslation(aGraph.numEdges() * 2u);
thrust::device_vector<quaternion4f> relativeRotation(aGraph.numEdges() * 2u);
thrust::device_vector<quaternion4f> absoluteRotation(aGraph.numEdges() * 2u);
TransformationExtractor extractRelativeT(
nodeTypes.data(),
neighborTypeKeys.data(),
neighborTypeVals.data(),
outTranslation.data(),
outRotation.data(),
relativeTranslation.data(),
relativeRotation.data(),
absoluteRotation.data()
);
thrust::counting_iterator<size_t> lastEdge(aGraph.numEdges() * 2u);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(aGraph.adjacencyKeys.begin(), aGraph.adjacencyVals.begin(), first)),
thrust::make_zip_iterator(thrust::make_tuple(aGraph.adjacencyKeys.end(), aGraph.adjacencyVals.end(), lastEdge)),
extractRelativeT);
if(mNeighborTypeKeys.size() == 0u)
{
//first call of init
mNeighborTypeKeys = thrust::host_vector<unsigned int>(neighborTypeKeys);
mNeighborTypeVals = thrust::host_vector<unsigned int>(neighborTypeVals);
mRelativeTranslation = thrust::host_vector<float3>(relativeTranslation);
mRelativeRotation = thrust::host_vector<quaternion4f>(relativeRotation);
mAbsoluteRotation = thrust::host_vector<quaternion4f>(absoluteRotation);
}
else
{
//init already called, append new data
size_t oldCount = mNeighborTypeKeys.size();
mNeighborTypeKeys.resize(oldCount + neighborTypeKeys.size());
mNeighborTypeVals.resize(oldCount + neighborTypeVals.size());
mRelativeTranslation.resize(oldCount + relativeTranslation.size());
mRelativeRotation.resize(oldCount + relativeRotation.size());
mAbsoluteRotation.resize(oldCount + absoluteRotation.size());
thrust::copy(neighborTypeKeys.begin(), neighborTypeKeys.end(), mNeighborTypeKeys.begin() + oldCount);
thrust::copy(neighborTypeVals.begin(), neighborTypeVals.end(), mNeighborTypeVals.begin() + oldCount);
thrust::copy(relativeTranslation.begin(), relativeTranslation.end(), mRelativeTranslation.begin() + oldCount);
thrust::copy(relativeRotation.begin(), relativeRotation.end(), mRelativeRotation.begin() + oldCount);
thrust::copy(absoluteRotation.begin(), absoluteRotation.end(), mAbsoluteRotation.begin() + oldCount);
}
//sort by node type
thrust::sort_by_key(
mNeighborTypeKeys.begin(),
mNeighborTypeKeys.end(),
thrust::make_zip_iterator(thrust::make_tuple(mNeighborTypeVals.begin(), mRelativeTranslation.begin(), mRelativeRotation.begin(), mAbsoluteRotation.begin()))
);
//setup search intervals for each node type
mIntervals.resize(aObj.materials.size() + 1u, 0u);
for (size_t i = 0u; i < mNeighborTypeKeys.size() - 1u; ++i)
{
if (mNeighborTypeKeys[i] < mNeighborTypeKeys[i + 1u])
{
mIntervals[mNeighborTypeKeys[i] + 1] = (unsigned)i + 1u;
}
}
//last element
if (mNeighborTypeKeys.size() > 0u)
mIntervals[mNeighborTypeKeys[mNeighborTypeKeys.size() - 1u] + 1] = (unsigned)mNeighborTypeKeys.size();
//fill gaps due to missing node types
for (size_t i = 1u; i < mIntervals.size(); ++i)
{
mIntervals[i] = std::max(mIntervals[i - 1u], mIntervals[i]);
}
#ifdef _DEBUG
outputHostVector("translations: ", mRelativeTranslation);
outputHostVector("rotations: ", mRelativeRotation);
#endif
}
__host__ void Wiggle::fixRelativeTransformations(WFObject & aObj, Graph & aGraph)
{
numCorrections = 0u;
size_t numNodes = aObj.objects.size();
thrust::host_vector<unsigned int> visited(numNodes, 0u);
thrust::host_vector<unsigned int> intervalsHost(aGraph.intervals);
thrust::host_vector<unsigned int> adjacencyValsHost(aGraph.adjacencyVals);
//Extract and upload node type information
thrust::host_vector<unsigned int> nodeTypesHost(aGraph.numNodes(), (unsigned int)aObj.materials.size());
for (size_t nodeId = 0; nodeId < aObj.objects.size(); ++nodeId)
{
size_t faceId = aObj.objects[nodeId].x;
size_t materialId = aObj.faces[faceId].material;
nodeTypesHost[nodeId] = (unsigned int)materialId;
}
if (seedNodeId >= (unsigned int)numNodes)
{
std::default_random_engine generator(seed);
std::uniform_int_distribution<unsigned int> distribution(0u, (unsigned int)numNodes - 1u);
seedNodeId = distribution(generator);
}
std::deque<unsigned int> frontier;
frontier.push_back(seedNodeId);
visited[seedNodeId] = 1u;
while (!frontier.empty())
{
const unsigned int nodeId = frontier.front();
frontier.pop_front();
processNeighbors(
aObj,
nodeId,
visited,
intervalsHost,
adjacencyValsHost,
nodeTypesHost);
for (unsigned int nbrId = intervalsHost[nodeId]; nbrId < intervalsHost[nodeId + 1]; ++nbrId)
{
const unsigned int nodeId = adjacencyValsHost[nbrId];
if (visited[nodeId] == 0u)
{
frontier.push_back(nodeId);
visited[nodeId] = 1u;
}
}
}
}
__host__ void Wiggle::processNeighbors(
WFObject& aObj,
unsigned int aObjId,
thrust::host_vector<unsigned int>& visited,
thrust::host_vector<unsigned int>& intervalsHost,
thrust::host_vector<unsigned int>& adjacencyValsHost,
thrust::host_vector<unsigned int>& nodeTypeIds)
{
const unsigned int nbrCount = intervalsHost[aObjId + 1u] - intervalsHost[aObjId];
if (nbrCount == 0)
return;
const unsigned int nodeCount = nbrCount + 1u;
thrust::host_vector<unsigned int> nodeIds(nodeCount, aObjId);
thrust::copy(adjacencyValsHost.begin() + intervalsHost[aObjId], adjacencyValsHost.begin() + intervalsHost[aObjId + 1], nodeIds.begin() + 1u);
thrust::host_vector<float3> vertexBufferHost;
thrust::host_vector<uint2> vtxRanges;
VertexBufferUnpacker unpackVertices;
unpackVertices(aObj, nodeIds, vtxRanges, vertexBufferHost);
//Use PCA to compute local coordiante system for each object
thrust::host_vector<float3> translations(nodeCount);
thrust::host_vector<quaternion4f> rotations(nodeCount);
thrust::host_vector<double> tmpCovMatrix(nodeCount * 3 * 3, 0.f);
thrust::host_vector<double> tmpDiagonalW(nodeCount * 3);
thrust::host_vector<double> tmpMatrixV(nodeCount * 3 * 3);
thrust::host_vector<double> tmpVecRV(nodeCount * 3);
LocalCoordsEstimator estimateT(
thrust::raw_pointer_cast(vtxRanges.data()),
thrust::raw_pointer_cast(vertexBufferHost.data()),
thrust::raw_pointer_cast(tmpCovMatrix.data()),
thrust::raw_pointer_cast(tmpDiagonalW.data()),
thrust::raw_pointer_cast(tmpMatrixV.data()),
thrust::raw_pointer_cast(tmpVecRV.data()),
thrust::raw_pointer_cast(translations.data()),
thrust::raw_pointer_cast(rotations.data())
);
//thrust::counting_iterator<size_t> first(0u);
//thrust::counting_iterator<size_t> last(nodeCount);
//thrust::for_each(first, last, estimateT);
for (unsigned int i = 0u; i < nodeCount; ++i)
{
estimateT(i);
}
if (debugOutputLocalFrames)
{
transformObj(aObj, nodeIds[0], translations[0], make_float3(0.f, 0.f, 0.f), rotations[0].conjugate());
return;
}
const unsigned int nodeId1 = nodeIds[0];
for (unsigned int i = 1; i < nodeIds.size(); i++)
{
const unsigned int nodeId2 = nodeIds[i];
if (visited[nodeId2])
continue;
const unsigned int typeId1 = nodeTypeIds[nodeId1];
const unsigned int typeId2 = nodeTypeIds[nodeId2];
quaternion4f rot = rotations[0];
float3 relativeT = transformVec(rot.conjugate(), translations[i] - translations[0]);
quaternion4f relativeR = rotations[i].conjugate() * rot;
float3 bestT = relativeT;
quaternion4f bestR = relativeR;
quaternion4f bestA = relativeR;
findBestMatch(typeId1, typeId2, relativeT, relativeR, bestT, bestR, bestA);
const float angleDelta = fabsf(fabsf((bestR * relativeR.conjugate()).w) - 1.f);
if (angleDelta < angleTolerance)
continue;
float3 translateDelta = (0.25f) * transformVec(rot, bestT - relativeT);
transformObj(aObj, nodeId2, translations[i], translateDelta, rotations[i] * bestR * rot.conjugate());
++numCorrections;
}
}
__host__ void Wiggle::findBestMatch(
unsigned int aTypeId1,
unsigned int aTypeId2,
const float3& aTranslation,
const quaternion4f& aRotation,
float3& oTranslation,
quaternion4f& oRotation,
quaternion4f& oAbsRotation)
{
float bestSpatialDist = FLT_MAX;
for (unsigned int id = mIntervals[aTypeId1]; id < mIntervals[aTypeId1 + 1]; id++)
{
if (mNeighborTypeVals[id] != aTypeId2)
continue;
const float3 delta = mRelativeTranslation[id] - aTranslation;
const float currentSpatialDist = len(delta);
if (currentSpatialDist < bestSpatialDist)
{
bestSpatialDist = currentSpatialDist;
}
}
float bestAngleDist = FLT_MAX;
for (unsigned int id = mIntervals[aTypeId1]; id < mIntervals[aTypeId1 + 1]; id++)
{
if (mNeighborTypeVals[id] != aTypeId2)
continue;
const float3 delta = mRelativeTranslation[id] - aTranslation;
const float currentSpatialDist = len(delta);
const float angleDelta = fabsf(fabsf((aRotation * mRelativeRotation[id].conjugate()).w) - 1.f);
if (currentSpatialDist < spatialTolerance + bestSpatialDist && angleDelta < bestAngleDist)
{
bestAngleDist = angleDelta;
oTranslation = mRelativeTranslation[id];
oRotation = mRelativeRotation[id];
oAbsRotation = mAbsoluteRotation[id];
}
}
}
__host__ void Wiggle::transformObj(
WFObject & aObj,
unsigned int aObjId,
const float3 & aObjCenter,
const float3 & aTranslation,
const quaternion4f & aRotation)
{
thrust::host_vector<unsigned int> processed(aObj.getNumVertices(), 0u);
for (int faceId = aObj.objects[aObjId].x; faceId < aObj.objects[aObjId].y; ++faceId)
{
WFObject::Face face = aObj.faces[faceId];
size_t vtxId1 = aObj.faces[faceId].vert1;
size_t vtxId2 = aObj.faces[faceId].vert2;
size_t vtxId3 = aObj.faces[faceId].vert3;
if (processed[vtxId1] == 0u)
{
processed[vtxId1] = 1u;
float3 vtx = aObj.vertices[vtxId1];
aObj.vertices[vtxId1] = transformVec(aRotation, vtx - aObjCenter) + aObjCenter + aTranslation;
}
if (processed[vtxId2] == 0u)
{
processed[vtxId2] = 1u;
float3 vtx = aObj.vertices[vtxId2];
aObj.vertices[vtxId2] = transformVec(aRotation, vtx - aObjCenter) + aObjCenter + aTranslation;
}
if (processed[vtxId3] == 0u)
{
processed[vtxId3] = 1u;
float3 vtx = aObj.vertices[vtxId3];
aObj.vertices[vtxId3] = transformVec(aRotation, vtx - aObjCenter) + aObjCenter + aTranslation;
}
}
} |
d10814fa1609fb3f7ac2e6efd4ebb7e0c9d5bb2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/VolumetricUpSamplingNearest.hip"
#else
#include <THHUNN/common.h>
static inline void THNN_(VolumetricUpSamplingNearest_shapeCheck)
(THCState *state,
THCTensor *input, THCTensor *gradOutput,
int nBatch, int nChannels,
int inputDepth, int inputHeight, int inputWidth,
int outputDepth, int outputHeight, int outputWidth) {
THArgCheck(inputDepth > 0 && inputHeight > 0 && inputWidth > 0
&& outputDepth && outputHeight > 0 && outputWidth > 0, 2,
"input and output sizes should be greater than 0,"
" but got input (D: %d, H: %d, W: %d) output (D: %d, H: %d, W: %d)",
inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth);
if (input != NULL) {
THCUNN_argCheck(state, THTensor_nDimensionLegacyAll(input) == 5, 2, input,
"5D input tensor expected but got: %s");
}
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, 5, 0, nBatch);
THCUNN_check_dim_size(state, gradOutput, 5, 1, nChannels);
THCUNN_check_dim_size(state, gradOutput, 5, 2, outputDepth);
THCUNN_check_dim_size(state, gradOutput, 5, 3, outputHeight);
THCUNN_check_dim_size(state, gradOutput, 5, 4, outputWidth);
}
}
void THNN_(VolumetricUpSamplingNearest_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int outputDepth,
int outputHeight,
int outputWidth)
{
THCUNN_assertSameGPU(state, 2, input, output);
int nbatch = THCTensor_(size)(state, input, 0);
int channels = THCTensor_(size)(state, input, 1);
int inputDepth = THCTensor_(size)(state, input, 2);
int inputHeight = THCTensor_(size)(state, input, 3);
int inputWidth = THCTensor_(size)(state, input, 4);
THNN_(VolumetricUpSamplingNearest_shapeCheck)(state, input, NULL, nbatch, channels,
inputDepth, inputHeight, inputWidth,
outputDepth, outputHeight, outputWidth);
THAssert(inputDepth > 0 && inputHeight > 0 && inputWidth > 0 &&
outputDepth > 0 && outputHeight > 0 && outputWidth > 0);
THCTensor_(resize5d)(state, output,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
outputDepth,
outputHeight,
outputWidth);
THCTensor_(zero)(state, output);
THCDeviceTensor<scalar_t, 5> idata = toDeviceTensor<scalar_t, 5>(state, input);
THCDeviceTensor<scalar_t, 5> odata = toDeviceTensor<scalar_t, 5>(state, output);
const int num_kernels = outputDepth * outputHeight * outputWidth;
const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
hipStream_t stream = THCState_getCurrentStream(state);
hipLaunchKernelGGL(( nearest_neighbor_5d_kernel<scalar_t, accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)), dim3(num_threads),
0, stream, num_kernels, idata, odata);
THCudaCheck(hipGetLastError());
}
void THNN_(VolumetricUpSamplingNearest_updateGradInput)(
THCState *state,
THCTensor *gradOutput,
THCTensor *gradInput,
int nbatch,
int nchannels,
int inputDepth,
int inputHeight,
int inputWidth,
int outputDepth,
int outputHeight,
int outputWidth)
{
THCUNN_assertSameGPU(state, 2, gradOutput, gradInput);
THNN_(VolumetricUpSamplingNearest_shapeCheck)(state, NULL, gradOutput, nbatch, nchannels,
inputDepth, inputHeight, inputWidth,
outputDepth, outputHeight, outputWidth);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resize5d)(state, gradInput, nbatch, nchannels, inputDepth, inputHeight, inputWidth);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<scalar_t, 5> data1 = toDeviceTensor<scalar_t, 5>(state, gradInput);
THCDeviceTensor<scalar_t, 5> data2 = toDeviceTensor<scalar_t, 5>(state, gradOutput);
const int num_kernels = outputDepth * outputHeight * outputWidth;
const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
hipStream_t stream = THCState_getCurrentStream(state);
hipLaunchKernelGGL(( nearest_neighbor_5d_kernel_backward<scalar_t, accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)),
dim3( num_threads), 0, stream, num_kernels, data1, data2);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, gradOutput);
}
#endif
| d10814fa1609fb3f7ac2e6efd4ebb7e0c9d5bb2c.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/VolumetricUpSamplingNearest.cu"
#else
#include <THCUNN/common.h>
static inline void THNN_(VolumetricUpSamplingNearest_shapeCheck)
(THCState *state,
THCTensor *input, THCTensor *gradOutput,
int nBatch, int nChannels,
int inputDepth, int inputHeight, int inputWidth,
int outputDepth, int outputHeight, int outputWidth) {
THArgCheck(inputDepth > 0 && inputHeight > 0 && inputWidth > 0
&& outputDepth && outputHeight > 0 && outputWidth > 0, 2,
"input and output sizes should be greater than 0,"
" but got input (D: %d, H: %d, W: %d) output (D: %d, H: %d, W: %d)",
inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth);
if (input != NULL) {
THCUNN_argCheck(state, THTensor_nDimensionLegacyAll(input) == 5, 2, input,
"5D input tensor expected but got: %s");
}
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, 5, 0, nBatch);
THCUNN_check_dim_size(state, gradOutput, 5, 1, nChannels);
THCUNN_check_dim_size(state, gradOutput, 5, 2, outputDepth);
THCUNN_check_dim_size(state, gradOutput, 5, 3, outputHeight);
THCUNN_check_dim_size(state, gradOutput, 5, 4, outputWidth);
}
}
void THNN_(VolumetricUpSamplingNearest_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int outputDepth,
int outputHeight,
int outputWidth)
{
THCUNN_assertSameGPU(state, 2, input, output);
int nbatch = THCTensor_(size)(state, input, 0);
int channels = THCTensor_(size)(state, input, 1);
int inputDepth = THCTensor_(size)(state, input, 2);
int inputHeight = THCTensor_(size)(state, input, 3);
int inputWidth = THCTensor_(size)(state, input, 4);
THNN_(VolumetricUpSamplingNearest_shapeCheck)(state, input, NULL, nbatch, channels,
inputDepth, inputHeight, inputWidth,
outputDepth, outputHeight, outputWidth);
THAssert(inputDepth > 0 && inputHeight > 0 && inputWidth > 0 &&
outputDepth > 0 && outputHeight > 0 && outputWidth > 0);
THCTensor_(resize5d)(state, output,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
outputDepth,
outputHeight,
outputWidth);
THCTensor_(zero)(state, output);
THCDeviceTensor<scalar_t, 5> idata = toDeviceTensor<scalar_t, 5>(state, input);
THCDeviceTensor<scalar_t, 5> odata = toDeviceTensor<scalar_t, 5>(state, output);
const int num_kernels = outputDepth * outputHeight * outputWidth;
const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
cudaStream_t stream = THCState_getCurrentStream(state);
nearest_neighbor_5d_kernel<scalar_t, accreal> <<<THCCeilDiv(num_kernels, num_threads), num_threads,
0, stream>>>(num_kernels, idata, odata);
THCudaCheck(cudaGetLastError());
}
void THNN_(VolumetricUpSamplingNearest_updateGradInput)(
THCState *state,
THCTensor *gradOutput,
THCTensor *gradInput,
int nbatch,
int nchannels,
int inputDepth,
int inputHeight,
int inputWidth,
int outputDepth,
int outputHeight,
int outputWidth)
{
THCUNN_assertSameGPU(state, 2, gradOutput, gradInput);
THNN_(VolumetricUpSamplingNearest_shapeCheck)(state, NULL, gradOutput, nbatch, nchannels,
inputDepth, inputHeight, inputWidth,
outputDepth, outputHeight, outputWidth);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resize5d)(state, gradInput, nbatch, nchannels, inputDepth, inputHeight, inputWidth);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<scalar_t, 5> data1 = toDeviceTensor<scalar_t, 5>(state, gradInput);
THCDeviceTensor<scalar_t, 5> data2 = toDeviceTensor<scalar_t, 5>(state, gradOutput);
const int num_kernels = outputDepth * outputHeight * outputWidth;
const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
cudaStream_t stream = THCState_getCurrentStream(state);
nearest_neighbor_5d_kernel_backward<scalar_t, accreal> <<<THCCeilDiv(num_kernels, num_threads),
num_threads, 0, stream>>>(num_kernels, data1, data2);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, gradOutput);
}
#endif
|
10dbea94a5bade3d540bd373c3656017c353ee72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// // Predicts the surface, i.e. performs raycasting
// // This is CUDA code; compile with nvcc
// // Author: Christian Diller, [email protected]
// #include "include/common.h"
// using Vec3ida = Eigen::Matrix<int, 3, 1, Eigen::DontAlign>;
// namespace kinectfusion {
// namespace internal {
// namespace cuda {
// __device__ __forceinline__
// float interpolate_trilinearly(const Vec3fda& point, const PtrStepSz<short2>& volume,
// const int3& volume_size, const float voxel_scale)
// {
// Vec3ida point_in_grid = point.cast<int>();
// const float vx = (static_cast<float>(point_in_grid.x()) + 0.5f);
// const float vy = (static_cast<float>(point_in_grid.y()) + 0.5f);
// const float vz = (static_cast<float>(point_in_grid.z()) + 0.5f);
// point_in_grid.x() = (point.x() < vx) ? (point_in_grid.x() - 1) : point_in_grid.x();
// point_in_grid.y() = (point.y() < vy) ? (point_in_grid.y() - 1) : point_in_grid.y();
// point_in_grid.z() = (point.z() < vz) ? (point_in_grid.z() - 1) : point_in_grid.z();
// const float a = (point.x() - (static_cast<float>(point_in_grid.x()) + 0.5f));
// const float b = (point.y() - (static_cast<float>(point_in_grid.y()) + 0.5f));
// const float c = (point.z() - (static_cast<float>(point_in_grid.z()) + 0.5f));
// return static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y())[point_in_grid.x()].x) * DIVSHORTMAX * (1 - a) * (1 - b) * (1 - c) +
// static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y())[point_in_grid.x()].x) * DIVSHORTMAX * (1 - a) * (1 - b) * c +
// static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x()].x) * DIVSHORTMAX * (1 - a) * b * (1 - c) +
// static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x()].x) * DIVSHORTMAX * (1 - a) * b * c +
// static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y())[point_in_grid.x() + 1].x) * DIVSHORTMAX * a * (1 - b) * (1 - c) +
// static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y())[point_in_grid.x() + 1].x) * DIVSHORTMAX * a * (1 - b) * c +
// static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x() + 1].x) * DIVSHORTMAX * a * b * (1 - c) +
// static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x() + 1].x) * DIVSHORTMAX * a * b * c;
// }
// __device__ __forceinline__
// float get_min_time(const float3& volume_max, const Vec3fda& origin, const Vec3fda& direction)
// {
// float txmin = ((direction.x() > 0 ? 0.f : volume_max.x) - origin.x()) / direction.x();
// float tymin = ((direction.y() > 0 ? 0.f : volume_max.y) - origin.y()) / direction.y();
// float tzmin = ((direction.z() > 0 ? 0.f : volume_max.z) - origin.z()) / direction.z();
// return fmax(fmax(txmin, tymin), tzmin);
// }
// __device__ __forceinline__
// float get_max_time(const float3& volume_max, const Vec3fda& origin, const Vec3fda& direction)
// {
// float txmax = ((direction.x() > 0 ? volume_max.x : 0.f) - origin.x()) / direction.x();
// float tymax = ((direction.y() > 0 ? volume_max.y : 0.f) - origin.y()) / direction.y();
// float tzmax = ((direction.z() > 0 ? volume_max.z : 0.f) - origin.z()) / direction.z();
// return fmin(fmin(txmax, tymax), tzmax);
// }
// __global__
// void raycast_tsdf_kernel(const PtrStepSz<short2> tsdf_volume, const PtrStepSz<uchar3> color_volume,
// PtrStepSz<float3> model_vertex, PtrStepSz<float3> model_normal,
// PtrStepSz<uchar3> model_color,
// const int3 volume_size, const float voxel_scale,
// const CameraParameters cam_parameters,
// const float truncation_distance,
// const Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation,
// const Vec3fda translation)
// {
// const int x = blockIdx.x * blockDim.x + threadIdx.x;
// const int y = blockIdx.y * blockDim.y + threadIdx.y;
// if (x >= model_vertex.cols || y >= model_vertex.rows)
// return;
// const float3 volume_range = make_float3(volume_size.x * voxel_scale,
// volume_size.y * voxel_scale,
// volume_size.z * voxel_scale);
// const Vec3fda pixel_position(
// (x - cam_parameters.principal_x) / cam_parameters.focal_x,
// (y - cam_parameters.principal_y) / cam_parameters.focal_y,
// 1.f);
// Vec3fda ray_direction = (rotation * pixel_position);
// ray_direction.normalize();
// float ray_length = fmax(get_min_time(volume_range, translation, ray_direction), 0.f);
// if (ray_length >= get_max_time(volume_range, translation, ray_direction))
// return;
// ray_length += voxel_scale;
// Vec3fda grid = (translation + (ray_direction * ray_length)) / voxel_scale;
// float tsdf = static_cast<float>(tsdf_volume.ptr(
// __float2int_rd(grid(2)) * volume_size.y + __float2int_rd(grid(1)))[__float2int_rd(grid(0))].x) *
// DIVSHORTMAX;
// const float max_search_length = ray_length + volume_range.x * sqrt(2.f);
// for (; ray_length < max_search_length; ray_length += truncation_distance * 0.5f) {
// grid = ((translation + (ray_direction * (ray_length + truncation_distance * 0.5f))) / voxel_scale);
// if (grid.x() < 1 || grid.x() >= volume_size.x - 1 || grid.y() < 1 ||
// grid.y() >= volume_size.y - 1 ||
// grid.z() < 1 || grid.z() >= volume_size.z - 1)
// continue;
// const float previous_tsdf = tsdf;
// tsdf = static_cast<float>(tsdf_volume.ptr(
// __float2int_rd(grid(2)) * volume_size.y + __float2int_rd(grid(1)))[__float2int_rd(
// grid(0))].x) *
// DIVSHORTMAX;
// if (previous_tsdf < 0.f && tsdf > 0.f) //Zero crossing from behind
// break;
// if (previous_tsdf > 0.f && tsdf < 0.f) { //Zero crossing
// const float t_star =
// ray_length - truncation_distance * 0.5f * previous_tsdf / (tsdf - previous_tsdf);
// const auto vertex = translation + ray_direction * t_star;
// const Vec3fda location_in_grid = (vertex / voxel_scale);
// if (location_in_grid.x() < 1 | location_in_grid.x() >= volume_size.x - 1 ||
// location_in_grid.y() < 1 || location_in_grid.y() >= volume_size.y - 1 ||
// location_in_grid.z() < 1 || location_in_grid.z() >= volume_size.z - 1)
// break;
// Vec3fda normal, shifted;
// shifted = location_in_grid;
// shifted.x() += 1;
// if (shifted.x() >= volume_size.x - 1)
// break;
// const float Fx1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// shifted = location_in_grid;
// shifted.x() -= 1;
// if (shifted.x() < 1)
// break;
// const float Fx2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// normal.x() = (Fx1 - Fx2);
// shifted = location_in_grid;
// shifted.y() += 1;
// if (shifted.y() >= volume_size.y - 1)
// break;
// const float Fy1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// shifted = location_in_grid;
// shifted.y() -= 1;
// if (shifted.y() < 1)
// break;
// const float Fy2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// normal.y() = (Fy1 - Fy2);
// shifted = location_in_grid;
// shifted.z() += 1;
// if (shifted.z() >= volume_size.z - 1)
// break;
// const float Fz1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// shifted = location_in_grid;
// shifted.z() -= 1;
// if (shifted.z() < 1)
// break;
// const float Fz2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// normal.z() = (Fz1 - Fz2);
// if (normal.norm() == 0)
// break;
// normal.normalize();
// model_vertex.ptr(y)[x] = make_float3(vertex.x(), vertex.y(), vertex.z());
// model_normal.ptr(y)[x] = make_float3(normal.x(), normal.y(), normal.z());
// auto location_in_grid_int = location_in_grid.cast<int>();
// model_color.ptr(y)[x] = color_volume.ptr(
// location_in_grid_int.z() * volume_size.y +
// location_in_grid_int.y())[location_in_grid_int.x()];
// break;
// }
// }
// }
// void surface_prediction(const VolumeData& volume,
// GpuMat& model_vertex, GpuMat& model_normal, GpuMat& model_color,
// const CameraParameters& cam_parameters,
// const float truncation_distance,
// const Eigen::Matrix4f& pose)
// {
// model_vertex.setTo(0);
// model_normal.setTo(0);
// model_color.setTo(0);
// dim3 threads(32, 32);
// dim3 blocks((model_vertex.cols + threads.x - 1) / threads.x,
// (model_vertex.rows + threads.y - 1) / threads.y);
// // raycast_tsdf_kernel<<<blocks, threads>>>(volume.tsdf_volume, volume.color_volume,
// // model_vertex, model_normal, model_color,
// // volume.volume_size, volume.voxel_scale,
// // cam_parameters,
// // truncation_distance,
// // pose.block(0, 0, 3, 3), pose.block(0, 3, 3, 1));
// hipDeviceSynchronize();
// }
// }
// }
// } | 10dbea94a5bade3d540bd373c3656017c353ee72.cu | // // Predicts the surface, i.e. performs raycasting
// // This is CUDA code; compile with nvcc
// // Author: Christian Diller, [email protected]
// #include "include/common.h"
// using Vec3ida = Eigen::Matrix<int, 3, 1, Eigen::DontAlign>;
// namespace kinectfusion {
// namespace internal {
// namespace cuda {
// __device__ __forceinline__
// float interpolate_trilinearly(const Vec3fda& point, const PtrStepSz<short2>& volume,
// const int3& volume_size, const float voxel_scale)
// {
// Vec3ida point_in_grid = point.cast<int>();
// const float vx = (static_cast<float>(point_in_grid.x()) + 0.5f);
// const float vy = (static_cast<float>(point_in_grid.y()) + 0.5f);
// const float vz = (static_cast<float>(point_in_grid.z()) + 0.5f);
// point_in_grid.x() = (point.x() < vx) ? (point_in_grid.x() - 1) : point_in_grid.x();
// point_in_grid.y() = (point.y() < vy) ? (point_in_grid.y() - 1) : point_in_grid.y();
// point_in_grid.z() = (point.z() < vz) ? (point_in_grid.z() - 1) : point_in_grid.z();
// const float a = (point.x() - (static_cast<float>(point_in_grid.x()) + 0.5f));
// const float b = (point.y() - (static_cast<float>(point_in_grid.y()) + 0.5f));
// const float c = (point.z() - (static_cast<float>(point_in_grid.z()) + 0.5f));
// return static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y())[point_in_grid.x()].x) * DIVSHORTMAX * (1 - a) * (1 - b) * (1 - c) +
// static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y())[point_in_grid.x()].x) * DIVSHORTMAX * (1 - a) * (1 - b) * c +
// static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x()].x) * DIVSHORTMAX * (1 - a) * b * (1 - c) +
// static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x()].x) * DIVSHORTMAX * (1 - a) * b * c +
// static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y())[point_in_grid.x() + 1].x) * DIVSHORTMAX * a * (1 - b) * (1 - c) +
// static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y())[point_in_grid.x() + 1].x) * DIVSHORTMAX * a * (1 - b) * c +
// static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x() + 1].x) * DIVSHORTMAX * a * b * (1 - c) +
// static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x() + 1].x) * DIVSHORTMAX * a * b * c;
// }
// __device__ __forceinline__
// float get_min_time(const float3& volume_max, const Vec3fda& origin, const Vec3fda& direction)
// {
// float txmin = ((direction.x() > 0 ? 0.f : volume_max.x) - origin.x()) / direction.x();
// float tymin = ((direction.y() > 0 ? 0.f : volume_max.y) - origin.y()) / direction.y();
// float tzmin = ((direction.z() > 0 ? 0.f : volume_max.z) - origin.z()) / direction.z();
// return fmax(fmax(txmin, tymin), tzmin);
// }
// __device__ __forceinline__
// float get_max_time(const float3& volume_max, const Vec3fda& origin, const Vec3fda& direction)
// {
// float txmax = ((direction.x() > 0 ? volume_max.x : 0.f) - origin.x()) / direction.x();
// float tymax = ((direction.y() > 0 ? volume_max.y : 0.f) - origin.y()) / direction.y();
// float tzmax = ((direction.z() > 0 ? volume_max.z : 0.f) - origin.z()) / direction.z();
// return fmin(fmin(txmax, tymax), tzmax);
// }
// __global__
// void raycast_tsdf_kernel(const PtrStepSz<short2> tsdf_volume, const PtrStepSz<uchar3> color_volume,
// PtrStepSz<float3> model_vertex, PtrStepSz<float3> model_normal,
// PtrStepSz<uchar3> model_color,
// const int3 volume_size, const float voxel_scale,
// const CameraParameters cam_parameters,
// const float truncation_distance,
// const Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation,
// const Vec3fda translation)
// {
// const int x = blockIdx.x * blockDim.x + threadIdx.x;
// const int y = blockIdx.y * blockDim.y + threadIdx.y;
// if (x >= model_vertex.cols || y >= model_vertex.rows)
// return;
// const float3 volume_range = make_float3(volume_size.x * voxel_scale,
// volume_size.y * voxel_scale,
// volume_size.z * voxel_scale);
// const Vec3fda pixel_position(
// (x - cam_parameters.principal_x) / cam_parameters.focal_x,
// (y - cam_parameters.principal_y) / cam_parameters.focal_y,
// 1.f);
// Vec3fda ray_direction = (rotation * pixel_position);
// ray_direction.normalize();
// float ray_length = fmax(get_min_time(volume_range, translation, ray_direction), 0.f);
// if (ray_length >= get_max_time(volume_range, translation, ray_direction))
// return;
// ray_length += voxel_scale;
// Vec3fda grid = (translation + (ray_direction * ray_length)) / voxel_scale;
// float tsdf = static_cast<float>(tsdf_volume.ptr(
// __float2int_rd(grid(2)) * volume_size.y + __float2int_rd(grid(1)))[__float2int_rd(grid(0))].x) *
// DIVSHORTMAX;
// const float max_search_length = ray_length + volume_range.x * sqrt(2.f);
// for (; ray_length < max_search_length; ray_length += truncation_distance * 0.5f) {
// grid = ((translation + (ray_direction * (ray_length + truncation_distance * 0.5f))) / voxel_scale);
// if (grid.x() < 1 || grid.x() >= volume_size.x - 1 || grid.y() < 1 ||
// grid.y() >= volume_size.y - 1 ||
// grid.z() < 1 || grid.z() >= volume_size.z - 1)
// continue;
// const float previous_tsdf = tsdf;
// tsdf = static_cast<float>(tsdf_volume.ptr(
// __float2int_rd(grid(2)) * volume_size.y + __float2int_rd(grid(1)))[__float2int_rd(
// grid(0))].x) *
// DIVSHORTMAX;
// if (previous_tsdf < 0.f && tsdf > 0.f) //Zero crossing from behind
// break;
// if (previous_tsdf > 0.f && tsdf < 0.f) { //Zero crossing
// const float t_star =
// ray_length - truncation_distance * 0.5f * previous_tsdf / (tsdf - previous_tsdf);
// const auto vertex = translation + ray_direction * t_star;
// const Vec3fda location_in_grid = (vertex / voxel_scale);
// if (location_in_grid.x() < 1 | location_in_grid.x() >= volume_size.x - 1 ||
// location_in_grid.y() < 1 || location_in_grid.y() >= volume_size.y - 1 ||
// location_in_grid.z() < 1 || location_in_grid.z() >= volume_size.z - 1)
// break;
// Vec3fda normal, shifted;
// shifted = location_in_grid;
// shifted.x() += 1;
// if (shifted.x() >= volume_size.x - 1)
// break;
// const float Fx1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// shifted = location_in_grid;
// shifted.x() -= 1;
// if (shifted.x() < 1)
// break;
// const float Fx2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// normal.x() = (Fx1 - Fx2);
// shifted = location_in_grid;
// shifted.y() += 1;
// if (shifted.y() >= volume_size.y - 1)
// break;
// const float Fy1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// shifted = location_in_grid;
// shifted.y() -= 1;
// if (shifted.y() < 1)
// break;
// const float Fy2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// normal.y() = (Fy1 - Fy2);
// shifted = location_in_grid;
// shifted.z() += 1;
// if (shifted.z() >= volume_size.z - 1)
// break;
// const float Fz1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// shifted = location_in_grid;
// shifted.z() -= 1;
// if (shifted.z() < 1)
// break;
// const float Fz2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// normal.z() = (Fz1 - Fz2);
// if (normal.norm() == 0)
// break;
// normal.normalize();
// model_vertex.ptr(y)[x] = make_float3(vertex.x(), vertex.y(), vertex.z());
// model_normal.ptr(y)[x] = make_float3(normal.x(), normal.y(), normal.z());
// auto location_in_grid_int = location_in_grid.cast<int>();
// model_color.ptr(y)[x] = color_volume.ptr(
// location_in_grid_int.z() * volume_size.y +
// location_in_grid_int.y())[location_in_grid_int.x()];
// break;
// }
// }
// }
// void surface_prediction(const VolumeData& volume,
// GpuMat& model_vertex, GpuMat& model_normal, GpuMat& model_color,
// const CameraParameters& cam_parameters,
// const float truncation_distance,
// const Eigen::Matrix4f& pose)
// {
// model_vertex.setTo(0);
// model_normal.setTo(0);
// model_color.setTo(0);
// dim3 threads(32, 32);
// dim3 blocks((model_vertex.cols + threads.x - 1) / threads.x,
// (model_vertex.rows + threads.y - 1) / threads.y);
// // raycast_tsdf_kernel<<<blocks, threads>>>(volume.tsdf_volume, volume.color_volume,
// // model_vertex, model_normal, model_color,
// // volume.volume_size, volume.voxel_scale,
// // cam_parameters,
// // truncation_distance,
// // pose.block(0, 0, 3, 3), pose.block(0, 3, 3, 1));
// cudaThreadSynchronize();
// }
// }
// }
// } |
60753132bd46c7306cdedadaa65116cd41718b09.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 40
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=1;
unsigned Value2=A[i];
unsigned Value3=B[i];
unsigned Value;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Addition access
if( ((i%32)<=7) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value2= I1*Value1;
Value3=I2*Value3;
Value1*=Value2;
Value3*=Value1;
Value2*=Value3;
Value1*=Value3;
// Value2= I1+I2;
// Value3=I1-I2;
// Value1=I1-Value2;
// Value3+=Value1;
// Value2-=Value3;
// Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
| 60753132bd46c7306cdedadaa65116cd41718b09.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 40
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=1;
unsigned Value2=A[i];
unsigned Value3=B[i];
unsigned Value;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Addition access
if( ((i%32)<=7) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value2= I1*Value1;
Value3=I2*Value3;
Value1*=Value2;
Value3*=Value1;
Value2*=Value3;
Value1*=Value3;
// Value2= I1+I2;
// Value3=I1-I2;
// Value1=I1-Value2;
// Value3+=Value1;
// Value2-=Value3;
// Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
|
e1e3cd60849e737a71f2ceded4525e45f7ceb3c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "findDiffLabels.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *devDiff = NULL;
hipMalloc(&devDiff, XSIZE*YSIZE);
int diffPitchInFloats = 2;
int nPoints = 1;
int nClusters = 1;
int *devClusters = NULL;
hipMalloc(&devClusters, XSIZE*YSIZE);
int *devChanges = NULL;
hipMalloc(&devChanges, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
findDiffLabels), dim3(gridBlock),dim3(threadBlock), 0, 0, devDiff,diffPitchInFloats,nPoints,nClusters,devClusters,devChanges);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
findDiffLabels), dim3(gridBlock),dim3(threadBlock), 0, 0, devDiff,diffPitchInFloats,nPoints,nClusters,devClusters,devChanges);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
findDiffLabels), dim3(gridBlock),dim3(threadBlock), 0, 0, devDiff,diffPitchInFloats,nPoints,nClusters,devClusters,devChanges);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e1e3cd60849e737a71f2ceded4525e45f7ceb3c5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "findDiffLabels.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *devDiff = NULL;
cudaMalloc(&devDiff, XSIZE*YSIZE);
int diffPitchInFloats = 2;
int nPoints = 1;
int nClusters = 1;
int *devClusters = NULL;
cudaMalloc(&devClusters, XSIZE*YSIZE);
int *devChanges = NULL;
cudaMalloc(&devChanges, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
findDiffLabels<<<gridBlock,threadBlock>>>(devDiff,diffPitchInFloats,nPoints,nClusters,devClusters,devChanges);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
findDiffLabels<<<gridBlock,threadBlock>>>(devDiff,diffPitchInFloats,nPoints,nClusters,devClusters,devChanges);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
findDiffLabels<<<gridBlock,threadBlock>>>(devDiff,diffPitchInFloats,nPoints,nClusters,devClusters,devChanges);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
daaeaefbe68339c2607ed65bf3e5665735ff6cde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////
// INCLUDES
////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <time.h>
#include "RRT.cuh"
////////////////////////////////////////////
// CUDA KERNELS
////////////////////////////////////////////
/*
* Initializes CUDA RNG
*/
__global__ void RNG_setup_kernel(hiprandState_t *state) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // thread id
hiprand_init(1234, idx, 0, &state[idx]); // using seed 1234 (change to time at a later stage)
}
/*
* Initializes adjacent matrix
*/
__global__ void init_adj_matrix_kernel(int * adjacency_matrix){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=0; i < NUM_THREADS*NUM_BLOCKS; i++){
int index = idx * NUM_THREADS*NUM_BLOCKS + i;
if(index % (NUM_THREADS*NUM_BLOCKS + 1) == 0){
adjacency_matrix[index] = 0;
}else{
adjacency_matrix[index] = 9999;
//adjacency_matrix[index] = 0;
}
}
}
/*
* Main kernel; Contains RRT algorithm
*/
__global__ void RRT_kernel(hiprandState_t *my_curandstate, int *adjacency_matrix,
double * path_solutions, double * control_solutions, double* tmp) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // thread id
// computing initial state
double start_state[] = { ANG_POS_0_MIN, ANG_VEL_0_MIN, ANG_POS_1_MIN, ANG_VEL_1_MIN }; // initial state; angle position measured from x-axis
start_state[0] += ((idx % GRID_X) * 2 * DELTA_X) + (2 * DELTA_X);
start_state[1] += (((idx / GRID_X) % GRID_Y) * 2 * DELTA_Y) + (2 * DELTA_Y);
start_state[2] += (((idx / (GRID_X*GRID_Y)) % GRID_Z) * 2 * DELTA_Z) + (2 * DELTA_Z);
start_state[3] += (((idx / (GRID_X*GRID_Y*GRID_Z)) % (GRID_W*NUM_BLOCKS)) * 2 * DELTA_W) + (2 * DELTA_W);
tmp[4*idx] = start_state[0];
tmp[4*idx+1] = start_state[1];
tmp[4*idx+2] = start_state[2];
tmp[4*idx+3] = start_state[3];
// TODO: automate goal placement around initial state
double end_state[NUM_OF_GOAL_STATES][DIMENSIONS] = {{0}};
int goal_idx;
for(goal_idx = 0; goal_idx < pow(3,DIMENSIONS); goal_idx++)
{
end_state[goal_idx][0] = start_state[0] + ((goal_idx%3) - 1)*2*DELTA_X;
end_state[goal_idx][1] = start_state[1] + (((goal_idx/3)%3) - 1)*2*DELTA_Y;
end_state[goal_idx][2] = start_state[2] + (((goal_idx/(3*3))%3) - 1)*2*DELTA_Z;
end_state[goal_idx][3] = start_state[3] + (((goal_idx/(3*3*3))%3) - 1)*2*DELTA_W;
}
//double state_limits[2][2] = { { start_state[0] - 3 * DELTA_X, start_state[0] + 3 * DELTA_X }, {start_state[1] - 3 * DELTA_Y, start_state[1] + 3 * DELTA_Y } }; // state limits; angular position between -pi & pi rad; angular velocity between -10 & 10 rad/s
double state_limits[4][2] = {
{start_state[0] - 3 * DELTA_X, start_state[0] + 3 * DELTA_X},
{start_state[1] - 3 * DELTA_Y, start_state[1] + 3 * DELTA_Y},
{start_state[2] - 3 * DELTA_Z, start_state[2] + 3 * DELTA_Z},
{start_state[3] - 3 * DELTA_W, start_state[3] + 3 * DELTA_W},
}; // state limits; angular position between -pi & pi rad; angular velocity between -10 & 10 rad/s
// control torques to be used: linspace(-5,5,20)
//*
double discrete_control_torques[] = {-20,-10,0,10,20};
//*/
/*
double discrete_control_torques[] = { -1.0000, -0.8947, -0.7895, -0.6842, -0.5789, -0.4737, -0.3684, -0.2632, -0.1579, -0.0526,
1.0000, 0.8947, 0.7895, 0.6842, 0.5789, 0.4737, 0.3684, 0.2632, 0.1579, 0.0526};
//*/
int number_of_discrete_torques = (int) (sizeof(discrete_control_torques) / sizeof(discrete_control_torques[0]));
double time_step = 0.04; // time interval between application of subsequent control torques
// static memory allocation
double random_state[DIMENSIONS]; // stores a state
double next_state[DIMENSIONS];
double RRT_tree[NUM_OF_ITERATIONS][DIMENSIONS]; // stores tree
int x, y;
for (x = 0; x < NUM_OF_ITERATIONS; x++) { // initialize tree to initial state
RRT_tree[x][0] = start_state[0];
RRT_tree[x][1] = start_state[1];
RRT_tree[x][2] = start_state[2];
RRT_tree[x][3] = start_state[3];
}
//int adjMatrix[NUM_THREADS][NUM_THREADS];
//memset(adjMatrix, 0, sizeof(int)*NUM_THREADS*NUM_THREADS);
int parent_state_index[NUM_OF_ITERATIONS]; // stores index of parent state for each state in graph RRT_tree
int control_action_index[NUM_OF_ITERATIONS]; // stores index of control actions in discrete_control_torques (each state will use a control action value in discrete_control_torques)
double u_path[NUM_OF_GOAL_STATES][LENGTH_OF_SOLN_PATH]; // stores sequence of control actions (solution to problem)
double x_path[NUM_OF_GOAL_STATES][LENGTH_OF_SOLN_PATH][DIMENSIONS];
for (y = 0; y < NUM_OF_GOAL_STATES; y++) {
for (x = 0; x < LENGTH_OF_SOLN_PATH; x++) { // initialize tree to initial state
x_path[y][x][0] = 0;
x_path[y][x][1] = 0;
x_path[y][x][2] = 0;
x_path[y][x][3] = 0;
u_path[y][x] = 0;
}
}
int state_index = 0; // stores sequence of states joining initial to goal state
double temp_achievable_states[5][DIMENSIONS]; // stores temporary achievable states from a particular vertex; 20 is length of discrete_control_torques
double distance_square_values[NUM_OF_ITERATIONS]; // stores distance square values
int goal_index;
int not_found[NUM_OF_GOAL_STATES] = {0};
for(int i=0; i < NUM_OF_GOAL_STATES;i++)
not_found[i] = 1;
int weight = 0;
double k1[4],k2[4],k3[4],k4[4],kTemp[4]; // for RK4 algorithm
// keep growing RRT until goal found or run out of iterations
int iteration;
for (iteration = 1; iteration < NUM_OF_ITERATIONS; iteration++) {
// get random state
random_state[0] = hiprand_uniform(my_curandstate + idx) * (state_limits[0][1] - state_limits[0][0]) + state_limits[0][0];
random_state[1] = hiprand_uniform(my_curandstate + idx) * (state_limits[1][1] - state_limits[1][0]) + state_limits[1][0];
random_state[2] = hiprand_uniform(my_curandstate + idx) * (state_limits[2][1] - state_limits[2][0]) + state_limits[2][0];
random_state[3] = hiprand_uniform(my_curandstate + idx) * (state_limits[3][1] - state_limits[3][0]) + state_limits[3][0];
// find distances between that state point and every vertex in RRT
euclidianDistSquare(random_state, RRT_tree, iteration, distance_square_values);
// select RRT vertex closest to the state point
int nearest_state_index = findMin(distance_square_values, iteration);
// from the closest RRT vertex, compute all the states that can be reached,
// given the pendulum dynamics and available torques
int ui;
for (ui = 0; ui < number_of_discrete_torques; ui++) {
// using RK4 for dynamics
acrobotDynamics(RRT_tree[nearest_state_index],discrete_control_torques[ui],k1);
kTemp[0] = RRT_tree[nearest_state_index][0]+0.5*k1[0]*time_step;
kTemp[1] = RRT_tree[nearest_state_index][1]+0.5*k1[1]*time_step;
kTemp[2] = RRT_tree[nearest_state_index][2]+0.5*k1[2]*time_step;
kTemp[3] = RRT_tree[nearest_state_index][3]+0.5*k1[3]*time_step;
acrobotDynamics(kTemp,discrete_control_torques[ui],k2);
kTemp[0] = RRT_tree[nearest_state_index][0]+0.5*k2[0]*time_step;
kTemp[1] = RRT_tree[nearest_state_index][1]+0.5*k2[1]*time_step;
kTemp[2] = RRT_tree[nearest_state_index][2]+0.5*k2[2]*time_step;
kTemp[3] = RRT_tree[nearest_state_index][3]+0.5*k2[3]*time_step;
acrobotDynamics(kTemp,discrete_control_torques[ui],k3);
kTemp[0] = RRT_tree[nearest_state_index][0]+k3[0]*time_step;
kTemp[1] = RRT_tree[nearest_state_index][1]+k3[1]*time_step;
kTemp[2] = RRT_tree[nearest_state_index][2]+k3[2]*time_step;
kTemp[3] = RRT_tree[nearest_state_index][3]+k3[3]*time_step;
acrobotDynamics(kTemp,discrete_control_torques[ui],k4);
temp_achievable_states[ui][0] = RRT_tree[nearest_state_index][0] + time_step*(1.0/6.0)*(k1[0]+2*k2[0]+2*k3[0]+k4[0]);
temp_achievable_states[ui][1] = RRT_tree[nearest_state_index][1] + time_step*(1.0/6.0)*(k1[1]+2*k2[1]+2*k3[1]+k4[1]);
temp_achievable_states[ui][2] = RRT_tree[nearest_state_index][2] + time_step*(1.0/6.0)*(k1[2]+2*k2[2]+2*k3[2]+k4[2]);
temp_achievable_states[ui][3] = RRT_tree[nearest_state_index][3] + time_step*(1.0/6.0)*(k1[3]+2*k2[3]+2*k3[3]+k4[3]);
}
// select the closest reachable state point
euclidianDistSquare(random_state, temp_achievable_states, number_of_discrete_torques, distance_square_values);
ui = findMin(distance_square_values, number_of_discrete_torques);
random_state[0] = temp_achievable_states[ui][0];
random_state[1] = temp_achievable_states[ui][1];
random_state[2] = temp_achievable_states[ui][2];
random_state[3] = temp_achievable_states[ui][3];
// if angular position is greater than pi rads, wrap around
if(xn[0] > M_PI || xn[0] < -M_PI)
xn[0] = fmod((xn[0]+M_PI), (2*M_PI)) - M_PI;
if(xn[2] > M_PI || xn[2] < -M_PI)
xn[2] = fmod((xn[2]+M_PI), (2*M_PI)) - M_PI;
// link reachable state point to the nearest vertex in the tree
RRT_tree[iteration][0] = random_state[0];
RRT_tree[iteration][1] = random_state[1];
RRT_tree[iteration][2] = random_state[2];
RRT_tree[iteration][3] = random_state[3];
parent_state_index[iteration] = nearest_state_index;
control_action_index[iteration] = ui;
// if tree has grown near enough to one of the surrounding goal states
// set that particular goal state to 'found'
// save path from initial state to that goal state
for (goal_index = 0; goal_index < NUM_OF_GOAL_STATES; goal_index++) {
if (not_found[goal_index] == 1
&& (random_state[0] <= end_state[goal_index][0] + 0.05)
&& (random_state[0] >= end_state[goal_index][0] - 0.05)) {
if ((random_state[1] <= end_state[goal_index][1] + 0.25)
&& (random_state[1] >= end_state[goal_index][1] - 0.25)) {
not_found[goal_index] = 0;
state_index = iteration;
int length_of_soln = 0;
while (state_index != 0) {
u_path[goal_index][length_of_soln] = discrete_control_torques[control_action_index[state_index]];
x_path[goal_index][length_of_soln][0] = RRT_tree[state_index][0];
x_path[goal_index][length_of_soln][1] = RRT_tree[state_index][1];
x_path[goal_index][length_of_soln][2] = RRT_tree[state_index][2];
x_path[goal_index][length_of_soln][3] = RRT_tree[state_index][3];
length_of_soln++;
state_index = parent_state_index[state_index];
}
}
}
}
}
weight = 1;
// Update adjacency matrix:
// for each goal state surrounding an initial state,
// if the goal state has been reached,
// if tree is growing near border of phase space, check if tree is growing within state space limits
// set respective flag in adjacency matrix to 1 (or to a weight)
int k;
for (k = 0; k < 8; k++) {
if (not_found[k] == 0) {
if (k == 0 && idx % GRID_X != 0) {
if (idx + GRID_X - 1 <= NUM_THREADS * NUM_BLOCKS - 1) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx + GRID_X - 1] = weight;
}
} else if (k == 1) {
if (idx + GRID_X <= NUM_THREADS * NUM_BLOCKS - 1) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx + GRID_X] = weight;
}
} else if (k == 2 && (idx + 1) % GRID_X != 0) {
if (idx + GRID_X + 1 <= NUM_THREADS * NUM_BLOCKS - 1) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx + GRID_X + 1] = weight;
}
} else if (k == 3 && idx % GRID_X != 0) {
if (idx - 1 >= 0) { // don't need that line
adjacency_matrix[idx * NUM_THREADS * 8 + idx - 1] = weight;
}
} else if (k == 4 && (idx + 1) % GRID_X != 0) {
if (idx + 1 <= NUM_THREADS * NUM_BLOCKS - 1) { // don't need that line
adjacency_matrix[idx * NUM_THREADS * 8 + idx + 1] = weight;
}
} else if (k == 5 && idx % GRID_X != 0) {
if (idx - GRID_X - 1 >= 0) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx - GRID_X - 1] = weight;
}
} else if (k == 6) {
if (idx - GRID_X >= 0) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx - GRID_X] = weight;
}
} else if (k == 7 && (idx + 1) % GRID_X != 0) {
if (idx - GRID_X + 1 >= 0) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx - GRID_X + 1] = weight;
}
}
}
}
//*/
//* copy path results of algorithm to device results array
int i, j;
int num_of_goals = NUM_OF_GOAL_STATES;
for (j = 0; j < num_of_goals; j++) {
for (i = 0; i < LENGTH_OF_SOLN_PATH; i++) {
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i] = x_path[j][i][0];
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i + 1] = x_path[j][i][1];
control_solutions[idx * num_of_goals * LENGTH_OF_SOLN_PATH + j * LENGTH_OF_SOLN_PATH + i] = u_path[j][i];
if (not_found[j] == 0) {
if (i == LENGTH_OF_SOLN_PATH - 2) {
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i] = start_state[0];
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i + 1] = start_state[1];
} else if (i == LENGTH_OF_SOLN_PATH - 1) {
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i] = end_state[j][0];
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i + 1] = end_state[j][1];
}
}
}
}
//*/
/*
int i;
for (i = 0; i < NUM_RESULTS_PER_THREAD; i++)
result[idx * NUM_RESULTS_PER_THREAD + i] = start_state[i];
//*/
/*
result[idx * NUM_RESULTS_PER_THREAD + 0] = start_state[0];
result[idx * NUM_RESULTS_PER_THREAD + 1] = start_state[1];
//*/
}
////////////////////////////////////////////
// HELPER FUNCTIONS
////////////////////////////////////////////
/*
* computes the euclidian distances squared from point A to every point in array B
*/
__device__ void euclidianDistSquare(double* A, double B[][4], int lengthOfB, double* listOfDistSq)
{
int i;
for(i = 0; i < lengthOfB; i++)
listOfDistSq[i] = pow((B[i][0] - A[0]),2) + pow((B[i][1] - A[1]),2) + pow((B[i][2] - A[2]),2) + pow((B[i][3] - A[3]),2);
}
/*
* finds the index of the minimum in an array
*/
__device__ int findMin(double array[], int lengthOfArray) {
int minIndex = 0;
int i;
for (i = 0; i < lengthOfArray; i++) {
if (array[i] < array[minIndex])
minIndex = i;
}
return minIndex;
}
/*
* Computes x_dot of the acrobot, given x and a control input u
*/
__device__ void acrobotDynamics(double* x, double u, double* xd)
{
// acrobot parameters
int m1 = 1;
int m2 = 1;
double l1 = 1;
double l2 = 1;
double lc1 = l1/2;
double lc2 = l2/2;
double Ic1 = (lc1*lc1)/3;
double Ic2 = (lc2*lc2)/3;
double I1 = Ic1+m1*lc1*lc1;
double I2 = Ic2+m2*lc2*lc2;
double b1 = 0.4;
double b2 = 0.4;
double g = 9.8;
double H[2][2] = {{I1 + I2 + m2*l1*l1 + 2*m2*l1*lc2*cos(x[2]),
I2 + m2*l1*lc2*cos(x[2])},
{I2 + m2*l1*lc2*cos(x[2]),
I2}};
double C[2][2] = {{-2*m2*l1*lc2*sin(x[2])*x[3] + b1,
-m2*l1*lc2*sin(x[2])*x[3]},
{m2*l1*lc2*sin(x[2])*x[1],
b2}};
double G[2] = {m1*g*lc1*sin(x[0]) + m2*g*(l1*sin(x[0])+lc2*sin(x[0]+x[2])),
m2*g*lc2*sin(x[0]+x[2])};
double B[2] = {0,u};
double invH[2][2];
matrixInverse(H,invH);
double C_qd[2];
double qd[2] = {x[1],x[3]};
matrixMultiply(C,qd,C_qd);
double temp[2];
temp[0] = B[0] - C_qd[0] - G[0];
temp[1] = B[1] - C_qd[1] - G[1];
double qdd[2];
matrixMultiply(invH,temp,qdd);
xd[0] = x[1];
xd[1] = qdd[0];
xd[2] = x[3];
xd[3] = qdd[1];
}
/*
* Computes the matrix inverse of a 2x2 matrix
*/
__device__ void matrixInverse(double M[2][2], double invM[2][2])
{
double detInv = 1/(M[0][0]*M[1][1] - M[0][1]*M[1][0]);
invM[0][0] = detInv*M[1][1];
invM[0][1] = detInv*M[0][1]*-1;
invM[1][0] = detInv*M[1][0]*-1;
invM[1][1] = detInv*M[0][0];
}
/*
* Computes the product between 2x2 and 2x1 matrices
*/
__device__ void matrixMultiply(double A[2][2], double B[2], double C[2])
{
C[0] = A[0][0]*B[0] + A[0][1]*B[1];
C[1] = A[1][0]*B[0] + A[1][1]*B[1];
}
| daaeaefbe68339c2607ed65bf3e5665735ff6cde.cu | ////////////////////////////////////////////
// INCLUDES
////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <time.h>
#include "RRT.cuh"
////////////////////////////////////////////
// CUDA KERNELS
////////////////////////////////////////////
/*
* Initializes CUDA RNG
*/
__global__ void RNG_setup_kernel(curandState *state) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // thread id
curand_init(1234, idx, 0, &state[idx]); // using seed 1234 (change to time at a later stage)
}
/*
* Initializes adjacent matrix
*/
__global__ void init_adj_matrix_kernel(int * adjacency_matrix){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=0; i < NUM_THREADS*NUM_BLOCKS; i++){
int index = idx * NUM_THREADS*NUM_BLOCKS + i;
if(index % (NUM_THREADS*NUM_BLOCKS + 1) == 0){
adjacency_matrix[index] = 0;
}else{
adjacency_matrix[index] = 9999;
//adjacency_matrix[index] = 0;
}
}
}
/*
* Main kernel; Contains RRT algorithm
*/
__global__ void RRT_kernel(curandState *my_curandstate, int *adjacency_matrix,
double * path_solutions, double * control_solutions, double* tmp) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // thread id
// computing initial state
double start_state[] = { ANG_POS_0_MIN, ANG_VEL_0_MIN, ANG_POS_1_MIN, ANG_VEL_1_MIN }; // initial state; angle position measured from x-axis
start_state[0] += ((idx % GRID_X) * 2 * DELTA_X) + (2 * DELTA_X);
start_state[1] += (((idx / GRID_X) % GRID_Y) * 2 * DELTA_Y) + (2 * DELTA_Y);
start_state[2] += (((idx / (GRID_X*GRID_Y)) % GRID_Z) * 2 * DELTA_Z) + (2 * DELTA_Z);
start_state[3] += (((idx / (GRID_X*GRID_Y*GRID_Z)) % (GRID_W*NUM_BLOCKS)) * 2 * DELTA_W) + (2 * DELTA_W);
tmp[4*idx] = start_state[0];
tmp[4*idx+1] = start_state[1];
tmp[4*idx+2] = start_state[2];
tmp[4*idx+3] = start_state[3];
// TODO: automate goal placement around initial state
double end_state[NUM_OF_GOAL_STATES][DIMENSIONS] = {{0}};
int goal_idx;
for(goal_idx = 0; goal_idx < pow(3,DIMENSIONS); goal_idx++)
{
end_state[goal_idx][0] = start_state[0] + ((goal_idx%3) - 1)*2*DELTA_X;
end_state[goal_idx][1] = start_state[1] + (((goal_idx/3)%3) - 1)*2*DELTA_Y;
end_state[goal_idx][2] = start_state[2] + (((goal_idx/(3*3))%3) - 1)*2*DELTA_Z;
end_state[goal_idx][3] = start_state[3] + (((goal_idx/(3*3*3))%3) - 1)*2*DELTA_W;
}
//double state_limits[2][2] = { { start_state[0] - 3 * DELTA_X, start_state[0] + 3 * DELTA_X }, {start_state[1] - 3 * DELTA_Y, start_state[1] + 3 * DELTA_Y } }; // state limits; angular position between -pi & pi rad; angular velocity between -10 & 10 rad/s
double state_limits[4][2] = {
{start_state[0] - 3 * DELTA_X, start_state[0] + 3 * DELTA_X},
{start_state[1] - 3 * DELTA_Y, start_state[1] + 3 * DELTA_Y},
{start_state[2] - 3 * DELTA_Z, start_state[2] + 3 * DELTA_Z},
{start_state[3] - 3 * DELTA_W, start_state[3] + 3 * DELTA_W},
}; // state limits; angular position between -pi & pi rad; angular velocity between -10 & 10 rad/s
// control torques to be used: linspace(-5,5,20)
//*
double discrete_control_torques[] = {-20,-10,0,10,20};
//*/
/*
double discrete_control_torques[] = { -1.0000, -0.8947, -0.7895, -0.6842, -0.5789, -0.4737, -0.3684, -0.2632, -0.1579, -0.0526,
1.0000, 0.8947, 0.7895, 0.6842, 0.5789, 0.4737, 0.3684, 0.2632, 0.1579, 0.0526};
//*/
int number_of_discrete_torques = (int) (sizeof(discrete_control_torques) / sizeof(discrete_control_torques[0]));
double time_step = 0.04; // time interval between application of subsequent control torques
// static memory allocation
double random_state[DIMENSIONS]; // stores a state
double next_state[DIMENSIONS];
double RRT_tree[NUM_OF_ITERATIONS][DIMENSIONS]; // stores tree
int x, y;
for (x = 0; x < NUM_OF_ITERATIONS; x++) { // initialize tree to initial state
RRT_tree[x][0] = start_state[0];
RRT_tree[x][1] = start_state[1];
RRT_tree[x][2] = start_state[2];
RRT_tree[x][3] = start_state[3];
}
//int adjMatrix[NUM_THREADS][NUM_THREADS];
//memset(adjMatrix, 0, sizeof(int)*NUM_THREADS*NUM_THREADS);
int parent_state_index[NUM_OF_ITERATIONS]; // stores index of parent state for each state in graph RRT_tree
int control_action_index[NUM_OF_ITERATIONS]; // stores index of control actions in discrete_control_torques (each state will use a control action value in discrete_control_torques)
double u_path[NUM_OF_GOAL_STATES][LENGTH_OF_SOLN_PATH]; // stores sequence of control actions (solution to problem)
double x_path[NUM_OF_GOAL_STATES][LENGTH_OF_SOLN_PATH][DIMENSIONS];
for (y = 0; y < NUM_OF_GOAL_STATES; y++) {
for (x = 0; x < LENGTH_OF_SOLN_PATH; x++) { // initialize tree to initial state
x_path[y][x][0] = 0;
x_path[y][x][1] = 0;
x_path[y][x][2] = 0;
x_path[y][x][3] = 0;
u_path[y][x] = 0;
}
}
int state_index = 0; // stores sequence of states joining initial to goal state
double temp_achievable_states[5][DIMENSIONS]; // stores temporary achievable states from a particular vertex; 20 is length of discrete_control_torques
double distance_square_values[NUM_OF_ITERATIONS]; // stores distance square values
int goal_index;
int not_found[NUM_OF_GOAL_STATES] = {0};
for(int i=0; i < NUM_OF_GOAL_STATES;i++)
not_found[i] = 1;
int weight = 0;
double k1[4],k2[4],k3[4],k4[4],kTemp[4]; // for RK4 algorithm
// keep growing RRT until goal found or run out of iterations
int iteration;
for (iteration = 1; iteration < NUM_OF_ITERATIONS; iteration++) {
// get random state
random_state[0] = curand_uniform(my_curandstate + idx) * (state_limits[0][1] - state_limits[0][0]) + state_limits[0][0];
random_state[1] = curand_uniform(my_curandstate + idx) * (state_limits[1][1] - state_limits[1][0]) + state_limits[1][0];
random_state[2] = curand_uniform(my_curandstate + idx) * (state_limits[2][1] - state_limits[2][0]) + state_limits[2][0];
random_state[3] = curand_uniform(my_curandstate + idx) * (state_limits[3][1] - state_limits[3][0]) + state_limits[3][0];
// find distances between that state point and every vertex in RRT
euclidianDistSquare(random_state, RRT_tree, iteration, distance_square_values);
// select RRT vertex closest to the state point
int nearest_state_index = findMin(distance_square_values, iteration);
// from the closest RRT vertex, compute all the states that can be reached,
// given the pendulum dynamics and available torques
int ui;
for (ui = 0; ui < number_of_discrete_torques; ui++) {
// using RK4 for dynamics
acrobotDynamics(RRT_tree[nearest_state_index],discrete_control_torques[ui],k1);
kTemp[0] = RRT_tree[nearest_state_index][0]+0.5*k1[0]*time_step;
kTemp[1] = RRT_tree[nearest_state_index][1]+0.5*k1[1]*time_step;
kTemp[2] = RRT_tree[nearest_state_index][2]+0.5*k1[2]*time_step;
kTemp[3] = RRT_tree[nearest_state_index][3]+0.5*k1[3]*time_step;
acrobotDynamics(kTemp,discrete_control_torques[ui],k2);
kTemp[0] = RRT_tree[nearest_state_index][0]+0.5*k2[0]*time_step;
kTemp[1] = RRT_tree[nearest_state_index][1]+0.5*k2[1]*time_step;
kTemp[2] = RRT_tree[nearest_state_index][2]+0.5*k2[2]*time_step;
kTemp[3] = RRT_tree[nearest_state_index][3]+0.5*k2[3]*time_step;
acrobotDynamics(kTemp,discrete_control_torques[ui],k3);
kTemp[0] = RRT_tree[nearest_state_index][0]+k3[0]*time_step;
kTemp[1] = RRT_tree[nearest_state_index][1]+k3[1]*time_step;
kTemp[2] = RRT_tree[nearest_state_index][2]+k3[2]*time_step;
kTemp[3] = RRT_tree[nearest_state_index][3]+k3[3]*time_step;
acrobotDynamics(kTemp,discrete_control_torques[ui],k4);
temp_achievable_states[ui][0] = RRT_tree[nearest_state_index][0] + time_step*(1.0/6.0)*(k1[0]+2*k2[0]+2*k3[0]+k4[0]);
temp_achievable_states[ui][1] = RRT_tree[nearest_state_index][1] + time_step*(1.0/6.0)*(k1[1]+2*k2[1]+2*k3[1]+k4[1]);
temp_achievable_states[ui][2] = RRT_tree[nearest_state_index][2] + time_step*(1.0/6.0)*(k1[2]+2*k2[2]+2*k3[2]+k4[2]);
temp_achievable_states[ui][3] = RRT_tree[nearest_state_index][3] + time_step*(1.0/6.0)*(k1[3]+2*k2[3]+2*k3[3]+k4[3]);
}
// select the closest reachable state point
euclidianDistSquare(random_state, temp_achievable_states, number_of_discrete_torques, distance_square_values);
ui = findMin(distance_square_values, number_of_discrete_torques);
random_state[0] = temp_achievable_states[ui][0];
random_state[1] = temp_achievable_states[ui][1];
random_state[2] = temp_achievable_states[ui][2];
random_state[3] = temp_achievable_states[ui][3];
// if angular position is greater than pi rads, wrap around
if(xn[0] > M_PI || xn[0] < -M_PI)
xn[0] = fmod((xn[0]+M_PI), (2*M_PI)) - M_PI;
if(xn[2] > M_PI || xn[2] < -M_PI)
xn[2] = fmod((xn[2]+M_PI), (2*M_PI)) - M_PI;
// link reachable state point to the nearest vertex in the tree
RRT_tree[iteration][0] = random_state[0];
RRT_tree[iteration][1] = random_state[1];
RRT_tree[iteration][2] = random_state[2];
RRT_tree[iteration][3] = random_state[3];
parent_state_index[iteration] = nearest_state_index;
control_action_index[iteration] = ui;
// if tree has grown near enough to one of the surrounding goal states
// set that particular goal state to 'found'
// save path from initial state to that goal state
for (goal_index = 0; goal_index < NUM_OF_GOAL_STATES; goal_index++) {
if (not_found[goal_index] == 1
&& (random_state[0] <= end_state[goal_index][0] + 0.05)
&& (random_state[0] >= end_state[goal_index][0] - 0.05)) {
if ((random_state[1] <= end_state[goal_index][1] + 0.25)
&& (random_state[1] >= end_state[goal_index][1] - 0.25)) {
not_found[goal_index] = 0;
state_index = iteration;
int length_of_soln = 0;
while (state_index != 0) {
u_path[goal_index][length_of_soln] = discrete_control_torques[control_action_index[state_index]];
x_path[goal_index][length_of_soln][0] = RRT_tree[state_index][0];
x_path[goal_index][length_of_soln][1] = RRT_tree[state_index][1];
x_path[goal_index][length_of_soln][2] = RRT_tree[state_index][2];
x_path[goal_index][length_of_soln][3] = RRT_tree[state_index][3];
length_of_soln++;
state_index = parent_state_index[state_index];
}
}
}
}
}
weight = 1;
// Update adjacency matrix:
// for each goal state surrounding an initial state,
// if the goal state has been reached,
// if tree is growing near border of phase space, check if tree is growing within state space limits
// set respective flag in adjacency matrix to 1 (or to a weight)
int k;
for (k = 0; k < 8; k++) {
if (not_found[k] == 0) {
if (k == 0 && idx % GRID_X != 0) {
if (idx + GRID_X - 1 <= NUM_THREADS * NUM_BLOCKS - 1) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx + GRID_X - 1] = weight;
}
} else if (k == 1) {
if (idx + GRID_X <= NUM_THREADS * NUM_BLOCKS - 1) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx + GRID_X] = weight;
}
} else if (k == 2 && (idx + 1) % GRID_X != 0) {
if (idx + GRID_X + 1 <= NUM_THREADS * NUM_BLOCKS - 1) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx + GRID_X + 1] = weight;
}
} else if (k == 3 && idx % GRID_X != 0) {
if (idx - 1 >= 0) { // don't need that line
adjacency_matrix[idx * NUM_THREADS * 8 + idx - 1] = weight;
}
} else if (k == 4 && (idx + 1) % GRID_X != 0) {
if (idx + 1 <= NUM_THREADS * NUM_BLOCKS - 1) { // don't need that line
adjacency_matrix[idx * NUM_THREADS * 8 + idx + 1] = weight;
}
} else if (k == 5 && idx % GRID_X != 0) {
if (idx - GRID_X - 1 >= 0) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx - GRID_X - 1] = weight;
}
} else if (k == 6) {
if (idx - GRID_X >= 0) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx - GRID_X] = weight;
}
} else if (k == 7 && (idx + 1) % GRID_X != 0) {
if (idx - GRID_X + 1 >= 0) {
adjacency_matrix[idx * NUM_THREADS * 8 + idx - GRID_X + 1] = weight;
}
}
}
}
//*/
//* copy path results of algorithm to device results array
int i, j;
int num_of_goals = NUM_OF_GOAL_STATES;
for (j = 0; j < num_of_goals; j++) {
for (i = 0; i < LENGTH_OF_SOLN_PATH; i++) {
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i] = x_path[j][i][0];
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i + 1] = x_path[j][i][1];
control_solutions[idx * num_of_goals * LENGTH_OF_SOLN_PATH + j * LENGTH_OF_SOLN_PATH + i] = u_path[j][i];
if (not_found[j] == 0) {
if (i == LENGTH_OF_SOLN_PATH - 2) {
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i] = start_state[0];
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i + 1] = start_state[1];
} else if (i == LENGTH_OF_SOLN_PATH - 1) {
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i] = end_state[j][0];
path_solutions[idx * 2 * num_of_goals * LENGTH_OF_SOLN_PATH + j * 2 * LENGTH_OF_SOLN_PATH + 2 * i + 1] = end_state[j][1];
}
}
}
}
//*/
/*
int i;
for (i = 0; i < NUM_RESULTS_PER_THREAD; i++)
result[idx * NUM_RESULTS_PER_THREAD + i] = start_state[i];
//*/
/*
result[idx * NUM_RESULTS_PER_THREAD + 0] = start_state[0];
result[idx * NUM_RESULTS_PER_THREAD + 1] = start_state[1];
//*/
}
////////////////////////////////////////////
// HELPER FUNCTIONS
////////////////////////////////////////////
/*
* computes the euclidian distances squared from point A to every point in array B
*/
__device__ void euclidianDistSquare(double* A, double B[][4], int lengthOfB, double* listOfDistSq)
{
int i;
for(i = 0; i < lengthOfB; i++)
listOfDistSq[i] = pow((B[i][0] - A[0]),2) + pow((B[i][1] - A[1]),2) + pow((B[i][2] - A[2]),2) + pow((B[i][3] - A[3]),2);
}
/*
* finds the index of the minimum in an array
*/
__device__ int findMin(double array[], int lengthOfArray) {
int minIndex = 0;
int i;
for (i = 0; i < lengthOfArray; i++) {
if (array[i] < array[minIndex])
minIndex = i;
}
return minIndex;
}
/*
* Computes x_dot of the acrobot, given x and a control input u
*/
__device__ void acrobotDynamics(double* x, double u, double* xd)
{
// acrobot parameters
int m1 = 1;
int m2 = 1;
double l1 = 1;
double l2 = 1;
double lc1 = l1/2;
double lc2 = l2/2;
double Ic1 = (lc1*lc1)/3;
double Ic2 = (lc2*lc2)/3;
double I1 = Ic1+m1*lc1*lc1;
double I2 = Ic2+m2*lc2*lc2;
double b1 = 0.4;
double b2 = 0.4;
double g = 9.8;
double H[2][2] = {{I1 + I2 + m2*l1*l1 + 2*m2*l1*lc2*cos(x[2]),
I2 + m2*l1*lc2*cos(x[2])},
{I2 + m2*l1*lc2*cos(x[2]),
I2}};
double C[2][2] = {{-2*m2*l1*lc2*sin(x[2])*x[3] + b1,
-m2*l1*lc2*sin(x[2])*x[3]},
{m2*l1*lc2*sin(x[2])*x[1],
b2}};
double G[2] = {m1*g*lc1*sin(x[0]) + m2*g*(l1*sin(x[0])+lc2*sin(x[0]+x[2])),
m2*g*lc2*sin(x[0]+x[2])};
double B[2] = {0,u};
double invH[2][2];
matrixInverse(H,invH);
double C_qd[2];
double qd[2] = {x[1],x[3]};
matrixMultiply(C,qd,C_qd);
double temp[2];
temp[0] = B[0] - C_qd[0] - G[0];
temp[1] = B[1] - C_qd[1] - G[1];
double qdd[2];
matrixMultiply(invH,temp,qdd);
xd[0] = x[1];
xd[1] = qdd[0];
xd[2] = x[3];
xd[3] = qdd[1];
}
/*
* Computes the matrix inverse of a 2x2 matrix
*/
__device__ void matrixInverse(double M[2][2], double invM[2][2])
{
double detInv = 1/(M[0][0]*M[1][1] - M[0][1]*M[1][0]);
invM[0][0] = detInv*M[1][1];
invM[0][1] = detInv*M[0][1]*-1;
invM[1][0] = detInv*M[1][0]*-1;
invM[1][1] = detInv*M[0][0];
}
/*
* Computes the product between 2x2 and 2x1 matrices
*/
__device__ void matrixMultiply(double A[2][2], double B[2], double C[2])
{
C[0] = A[0][0]*B[0] + A[0][1]*B[1];
C[1] = A[1][0]*B[0] + A[1][1]*B[1];
}
|
7cacc17cdf655ab0a500d673c23d45b2b9b7b39f.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include"io.h"
#include"cuda_mpi_routines.h"
/*! \fn int initialize_cuda_mpi(int myid, int nprocs);
* \brief CUDA initialization within MPI. */
int initialize_cuda_mpi(int myid, int nprocs)
{
int i_device = 0; //GPU device for this process
int n_device; //number of GPU devices available
hipError_t flag_error;
//get the number of cuda devices
flag_error = hipGetDeviceCount(&n_device);
#ifdef N_DEVICE_PER_NODE
// Override the number of devices per Node, solve gpu errors on summit
n_device = N_DEVICE_PER_NODE;
chprintf( "N CUDA devices per Node: %d \n", n_device );
#endif
//check for errors
if(flag_error!=hipSuccess)
{
if(flag_error==hipErrorNoDevice)
fprintf(stderr,"hipGetDeviceCount: Error! for myid = %d and n_device = %d; hipErrorNoDevice\n",myid,n_device);
if(flag_error==hipErrorInsufficientDriver)
fprintf(stderr,"hipGetDeviceCount: Error! for myid = %d and n_device = %d; hipErrorInsufficientDriver\n",myid,n_device);
fflush(stderr);
return 1;
}
//set a cuda device for each process
hipSetDevice(myid%n_device);
//double check
hipGetDevice(&i_device);
printf("In initialize_cuda_mpi: myid = %d, i_device = %d, n_device = %d\n",myid,i_device,n_device);
fflush(stdout);
return 0;
}
| 7cacc17cdf655ab0a500d673c23d45b2b9b7b39f.cu | #include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include"io.h"
#include"cuda_mpi_routines.h"
/*! \fn int initialize_cuda_mpi(int myid, int nprocs);
* \brief CUDA initialization within MPI. */
int initialize_cuda_mpi(int myid, int nprocs)
{
int i_device = 0; //GPU device for this process
int n_device; //number of GPU devices available
cudaError_t flag_error;
//get the number of cuda devices
flag_error = cudaGetDeviceCount(&n_device);
#ifdef N_DEVICE_PER_NODE
// Override the number of devices per Node, solve gpu errors on summit
n_device = N_DEVICE_PER_NODE;
chprintf( "N CUDA devices per Node: %d \n", n_device );
#endif
//check for errors
if(flag_error!=cudaSuccess)
{
if(flag_error==cudaErrorNoDevice)
fprintf(stderr,"cudaGetDeviceCount: Error! for myid = %d and n_device = %d; cudaErrorNoDevice\n",myid,n_device);
if(flag_error==cudaErrorInsufficientDriver)
fprintf(stderr,"cudaGetDeviceCount: Error! for myid = %d and n_device = %d; cudaErrorInsufficientDriver\n",myid,n_device);
fflush(stderr);
return 1;
}
//set a cuda device for each process
cudaSetDevice(myid%n_device);
//double check
cudaGetDevice(&i_device);
printf("In initialize_cuda_mpi: myid = %d, i_device = %d, n_device = %d\n",myid,i_device,n_device);
fflush(stdout);
return 0;
}
|
4df60bed3fd3cbcabf3d9f64c23f26cad5645fba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel(void) {
while(1);
} | 4df60bed3fd3cbcabf3d9f64c23f26cad5645fba.cu | #include "includes.h"
__global__ void kernel(void) {
while(1);
} |
fcb441da261eb92bff2d799dc0acb905a557bd80.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <qudaQKXTM.h>
#include <errno.h>
#include <mpi.h>
#include <limits>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <typeinfo>
#include <cuPrintf.cu>
#define THREADS_PER_BLOCK 64
#define PI 3.141592653589793
//#define TIMING_REPORT
using namespace quda;
extern Topology *default_topo;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// $$ Section 2: Constant Refeneces $$
/* block for device constants */
__constant__ bool c_dimBreak[4];
__constant__ int c_nColor;
__constant__ int c_nDim;
__constant__ int c_localL[4];
__constant__ int c_plusGhost[4];
__constant__ int c_minusGhost[4];
__constant__ int c_stride;
__constant__ int c_surface[4];
__constant__ int c_nSpin;
__constant__ double c_alphaAPE;
__constant__ double c_alphaGauss;
__constant__ int c_threads;
__constant__ int c_eps[6][3];
__constant__ int c_sgn_eps[6];
__constant__ int c_procPosition[4];
__constant__ int c_totalL[4];
__constant__ int c_Nmoms;
__constant__ short int c_moms[MAX_NMOMENTA][3];
__constant__ short int c_mesons_indices[10][16][4];
__constant__ short int c_NTN_indices[16][4];
__constant__ short int c_NTR_indices[64][6];
__constant__ short int c_RTN_indices[64][6];
__constant__ short int c_RTR_indices[256][8];
__constant__ short int c_Delta_indices[3][16][4];
__constant__ float c_mesons_values[10][16];
__constant__ float c_NTN_values[16];
__constant__ float c_NTR_values[64];
__constant__ float c_RTN_values[64];
__constant__ float c_RTR_values[256];
__constant__ float c_Delta_values[3][16];
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////
/* Block for global variables */
float GK_deviceMemory = 0.;
int GK_nColor;
int GK_nSpin;
int GK_nDim;
int GK_strideFull;
double GK_alphaAPE;
double GK_alphaGauss;
int GK_localVolume;
int GK_totalVolume;
int GK_nsmearAPE;
int GK_nsmearGauss;
bool GK_dimBreak[QUDAQKXTM_DIM];
int GK_localL[QUDAQKXTM_DIM];
int GK_totalL[QUDAQKXTM_DIM];
int GK_nProc[QUDAQKXTM_DIM];
int GK_plusGhost[QUDAQKXTM_DIM];
int GK_minusGhost[QUDAQKXTM_DIM];
int GK_surface3D[QUDAQKXTM_DIM];
bool GK_init_qudaQKXTM_flag = false;
int GK_Nsources;
int GK_sourcePosition[MAX_NSOURCES][QUDAQKXTM_DIM];
int GK_Nmoms;
short int GK_moms[MAX_NMOMENTA][3];
short int GK_mesons_indices[10][16][4] = {0,0,0,0,0,0,1,1,0,0,2,2,0,0,3,3,1,1,0,0,1,1,1,1,1,1,2,2,1,1,3,3,2,2,0,0,2,2,1,1,2,2,2,2,2,2,3,3,3,3,0,0,3,3,1,1,3,3,2,2,3,3,3,3,0,2,0,2,0,2,1,3,0,2,2,0,0,2,3,1,1,3,0,2,1,3,1,3,1,3,2,0,1,3,3,1,2,0,0,2,2,0,1,3,2,0,2,0,2,0,3,1,3,1,0,2,3,1,1,3,3,1,2,0,3,1,3,1,0,3,0,3,0,3,1,2,0,3,2,1,0,3,3,0,1,2,0,3,1,2,1,2,1,2,2,1,1,2,3,0,2,1,0,3,2,1,1,2,2,1,2,1,2,1,3,0,3,0,0,3,3,0,1,2,3,0,2,1,3,0,3,0,0,3,0,3,0,3,1,2,0,3,2,1,0,3,3,0,1,2,0,3,1,2,1,2,1,2,2,1,1,2,3,0,2,1,0,3,2,1,1,2,2,1,2,1,2,1,3,0,3,0,0,3,3,0,1,2,3,0,2,1,3,0,3,0,0,2,0,2,0,2,1,3,0,2,2,0,0,2,3,1,1,3,0,2,1,3,1,3,1,3,2,0,1,3,3,1,2,0,0,2,2,0,1,3,2,0,2,0,2,0,3,1,3,1,0,2,3,1,1,3,3,1,2,0,3,1,3,1,0,0,0,0,0,0,1,1,0,0,2,2,0,0,3,3,1,1,0,0,1,1,1,1,1,1,2,2,1,1,3,3,2,2,0,0,2,2,1,1,2,2,2,2,2,2,3,3,3,3,0,0,3,3,1,1,3,3,2,2,3,3,3,3,0,1,0,1,0,1,1,0,0,1,2,3,0,1,3,2,1,0,0,1,1,0,1,0,1,0,2,3,1,0,3,2,2,3,0,1,2,3,1,0,2,3,2,3,2,3,3,2,3,2,0,1,3,2,1,0,3,2,2,3,3,2,3,2,0,1,0,1,0,1,1,0,0,1,2,3,0,1,3,2,1,0,0,1,1,0,1,0,1,0,2,3,1,0,3,2,2,3,0,1,2,3,1,0,2,3,2,3,2,3,3,2,3,2,0,1,3,2,1,0,3,2,2,3,3,2,3,2,0,0,0,0,0,0,1,1,0,0,2,2,0,0,3,3,1,1,0,0,1,1,1,1,1,1,2,2,1,1,3,3,2,2,0,0,2,2,1,1,2,2,2,2,2,2,3,3,3,3,0,0,3,3,1,1,3,3,2,2,3,3,3,3,0,2,0,2,0,2,1,3,0,2,2,0,0,2,3,1,1,3,0,2,1,3,1,3,1,3,2,0,1,3,3,1,2,0,0,2,2,0,1,3,2,0,2,0,2,0,3,1,3,1,0,2,3,1,1,3,3,1,2,0,3,1,3,1};
float GK_mesons_values[10][16] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,-1,-1,1,-1,1,1,-1,-1,1,1,-1,1,-1,-1,1,-1,1,1,-1,1,-1,-1,1,1,-1,-1,1,-1,1,1,-1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,-1,-1,1,-1,1,1,-1,-1,1,1,-1,1,-1,-1,1,-1,1,1,-1,1,-1,-1,1,1,-1,-1,1,-1,1,1,-1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1};
short int GK_NTN_indices[16][4] = {0,1,0,1,0,1,1,0,0,1,2,3,0,1,3,2,1,0,0,1,1,0,1,0,1,0,2,3,1,0,3,2,2,3,0,1,2,3,1,0,2,3,2,3,2,3,3,2,3,2,0,1,3,2,1,0,3,2,2,3,3,2,3,2};
float GK_NTN_values[16] = {-1,1,-1,1,1,-1,1,-1,-1,1,-1,1,1,-1,1,-1};
short int GK_NTR_indices[64][6] = {0,1,0,3,0,2,0,1,0,3,1,3,0,1,0,3,2,0,0,1,0,3,3,1,0,1,1,2,0,2,0,1,1,2,1,3,0,1,1,2,2,0,0,1,1,2,3,1,0,1,2,1,0,2,0,1,2,1,1,3,0,1,2,1,2,0,0,1,2,1,3,1,0,1,3,0,0,2,0,1,3,0,1,3,0,1,3,0,2,0,0,1,3,0,3,1,1,0,0,3,0,2,1,0,0,3,1,3,1,0,0,3,2,0,1,0,0,3,3,1,1,0,1,2,0,2,1,0,1,2,1,3,1,0,1,2,2,0,1,0,1,2,3,1,1,0,2,1,0,2,1,0,2,1,1,3,1,0,2,1,2,0,1,0,2,1,3,1,1,0,3,0,0,2,1,0,3,0,1,3,1,0,3,0,2,0,1,0,3,0,3,1,2,3,0,3,0,2,2,3,0,3,1,3,2,3,0,3,2,0,2,3,0,3,3,1,2,3,1,2,0,2,2,3,1,2,1,3,2,3,1,2,2,0,2,3,1,2,3,1,2,3,2,1,0,2,2,3,2,1,1,3,2,3,2,1,2,0,2,3,2,1,3,1,2,3,3,0,0,2,2,3,3,0,1,3,2,3,3,0,2,0,2,3,3,0,3,1,3,2,0,3,0,2,3,2,0,3,1,3,3,2,0,3,2,0,3,2,0,3,3,1,3,2,1,2,0,2,3,2,1,2,1,3,3,2,1,2,2,0,3,2,1,2,3,1,3,2,2,1,0,2,3,2,2,1,1,3,3,2,2,1,2,0,3,2,2,1,3,1,3,2,3,0,0,2,3,2,3,0,1,3,3,2,3,0,2,0,3,2,3,0,3,1};
float GK_NTR_values[64] = {1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,1,1,1,1};
short int GK_RTN_indices[64][6] = {0,3,0,1,0,2,0,3,0,1,1,3,0,3,0,1,2,0,0,3,0,1,3,1,0,3,1,0,0,2,0,3,1,0,1,3,0,3,1,0,2,0,0,3,1,0,3,1,0,3,2,3,0,2,0,3,2,3,1,3,0,3,2,3,2,0,0,3,2,3,3,1,0,3,3,2,0,2,0,3,3,2,1,3,0,3,3,2,2,0,0,3,3,2,3,1,1,2,0,1,0,2,1,2,0,1,1,3,1,2,0,1,2,0,1,2,0,1,3,1,1,2,1,0,0,2,1,2,1,0,1,3,1,2,1,0,2,0,1,2,1,0,3,1,1,2,2,3,0,2,1,2,2,3,1,3,1,2,2,3,2,0,1,2,2,3,3,1,1,2,3,2,0,2,1,2,3,2,1,3,1,2,3,2,2,0,1,2,3,2,3,1,2,1,0,1,0,2,2,1,0,1,1,3,2,1,0,1,2,0,2,1,0,1,3,1,2,1,1,0,0,2,2,1,1,0,1,3,2,1,1,0,2,0,2,1,1,0,3,1,2,1,2,3,0,2,2,1,2,3,1,3,2,1,2,3,2,0,2,1,2,3,3,1,2,1,3,2,0,2,2,1,3,2,1,3,2,1,3,2,2,0,2,1,3,2,3,1,3,0,0,1,0,2,3,0,0,1,1,3,3,0,0,1,2,0,3,0,0,1,3,1,3,0,1,0,0,2,3,0,1,0,1,3,3,0,1,0,2,0,3,0,1,0,3,1,3,0,2,3,0,2,3,0,2,3,1,3,3,0,2,3,2,0,3,0,2,3,3,1,3,0,3,2,0,2,3,0,3,2,1,3,3,0,3,2,2,0,3,0,3,2,3,1};
float GK_RTN_values[64] = {-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1};
short int GK_RTR_indices[256][8] = {0,3,0,3,0,2,0,2,0,3,0,3,0,2,1,3,0,3,0,3,0,2,2,0,0,3,0,3,0,2,3,1,0,3,0,3,1,3,0,2,0,3,0,3,1,3,1,3,0,3,0,3,1,3,2,0,0,3,0,3,1,3,3,1,0,3,0,3,2,0,0,2,0,3,0,3,2,0,1,3,0,3,0,3,2,0,2,0,0,3,0,3,2,0,3,1,0,3,0,3,3,1,0,2,0,3,0,3,3,1,1,3,0,3,0,3,3,1,2,0,0,3,0,3,3,1,3,1,0,3,1,2,0,2,0,2,0,3,1,2,0,2,1,3,0,3,1,2,0,2,2,0,0,3,1,2,0,2,3,1,0,3,1,2,1,3,0,2,0,3,1,2,1,3,1,3,0,3,1,2,1,3,2,0,0,3,1,2,1,3,3,1,0,3,1,2,2,0,0,2,0,3,1,2,2,0,1,3,0,3,1,2,2,0,2,0,0,3,1,2,2,0,3,1,0,3,1,2,3,1,0,2,0,3,1,2,3,1,1,3,0,3,1,2,3,1,2,0,0,3,1,2,3,1,3,1,0,3,2,1,0,2,0,2,0,3,2,1,0,2,1,3,0,3,2,1,0,2,2,0,0,3,2,1,0,2,3,1,0,3,2,1,1,3,0,2,0,3,2,1,1,3,1,3,0,3,2,1,1,3,2,0,0,3,2,1,1,3,3,1,0,3,2,1,2,0,0,2,0,3,2,1,2,0,1,3,0,3,2,1,2,0,2,0,0,3,2,1,2,0,3,1,0,3,2,1,3,1,0,2,0,3,2,1,3,1,1,3,0,3,2,1,3,1,2,0,0,3,2,1,3,1,3,1,0,3,3,0,0,2,0,2,0,3,3,0,0,2,1,3,0,3,3,0,0,2,2,0,0,3,3,0,0,2,3,1,0,3,3,0,1,3,0,2,0,3,3,0,1,3,1,3,0,3,3,0,1,3,2,0,0,3,3,0,1,3,3,1,0,3,3,0,2,0,0,2,0,3,3,0,2,0,1,3,0,3,3,0,2,0,2,0,0,3,3,0,2,0,3,1,0,3,3,0,3,1,0,2,0,3,3,0,3,1,1,3,0,3,3,0,3,1,2,0,0,3,3,0,3,1,3,1,1,2,0,3,0,2,0,2,1,2,0,3,0,2,1,3,1,2,0,3,0,2,2,0,1,2,0,3,0,2,3,1,1,2,0,3,1,3,0,2,1,2,0,3,1,3,1,3,1,2,0,3,1,3,2,0,1,2,0,3,1,3,3,1,1,2,0,3,2,0,0,2,1,2,0,3,2,0,1,3,1,2,0,3,2,0,2,0,1,2,0,3,2,0,3,1,1,2,0,3,3,1,0,2,1,2,0,3,3,1,1,3,1,2,0,3,3,1,2,0,1,2,0,3,3,1,3,1,1,2,1,2,0,2,0,2,1,2,1,2,0,2,1,3,1,2,1,2,0,2,2,0,1,2,1,2,0,2,3,1,1,2,1,2,1,3,0,2,1,2,1,2,1,3,1,3,1,2,1,2,1,3,2,0,1,2,1,2,1,3,3,1,1,2,1,2,2,0,0,2,1,2,1,2,2,0,1,3,1,2,1,2,2,0,2,0,1,2,1,2,2,0,3,1,1,2,1,2,3,1,0,2,1,2,1,2,3,1,1,3,1,2,1,2,3,1,2,0,1,2,1,2,3,1,3,1,1,2,2,1,0,2,0,2,1,2,2,1,0,2,1,3,1,2,2,1,0,2,2,0,1,2,2,1,0,2,3,1,1,2,2,1,1,3,0,2,1,2,2,1,1,3,1,3,1,2,2,1,1,3,2,0,1,2,2,1,1,3,3,1,1,2,2,1,2,0,0,2,1,2,2,1,2,0,1,3,1,2,2,1,2,0,2,0,1,2,2,1,2,0,3,1,1,2,2,1,3,1,0,2,1,2,2,1,3,1,1,3,1,2,2,1,3,1,2,0,1,2,2,1,3,1,3,1,1,2,3,0,0,2,0,2,1,2,3,0,0,2,1,3,1,2,3,0,0,2,2,0,1,2,3,0,0,2,3,1,1,2,3,0,1,3,0,2,1,2,3,0,1,3,1,3,1,2,3,0,1,3,2,0,1,2,3,0,1,3,3,1,1,2,3,0,2,0,0,2,1,2,3,0,2,0,1,3,1,2,3,0,2,0,2,0,1,2,3,0,2,0,3,1,1,2,3,0,3,1,0,2,1,2,3,0,3,1,1,3,1,2,3,0,3,1,2,0,1,2,3,0,3,1,3,1,2,1,0,3,0,2,0,2,2,1,0,3,0,2,1,3,2,1,0,3,0,2,2,0,2,1,0,3,0,2,3,1,2,1,0,3,1,3,0,2,2,1,0,3,1,3,1,3,2,1,0,3,1,3,2,0,2,1,0,3,1,3,3,1,2,1,0,3,2,0,0,2,2,1,0,3,2,0,1,3,2,1,0,3,2,0,2,0,2,1,0,3,2,0,3,1,2,1,0,3,3,1,0,2,2,1,0,3,3,1,1,3,2,1,0,3,3,1,2,0,2,1,0,3,3,1,3,1,2,1,1,2,0,2,0,2,2,1,1,2,0,2,1,3,2,1,1,2,0,2,2,0,2,1,1,2,0,2,3,1,2,1,1,2,1,3,0,2,2,1,1,2,1,3,1,3,2,1,1,2,1,3,2,0,2,1,1,2,1,3,3,1,2,1,1,2,2,0,0,2,2,1,1,2,2,0,1,3,2,1,1,2,2,0,2,0,2,1,1,2,2,0,3,1,2,1,1,2,3,1,0,2,2,1,1,2,3,1,1,3,2,1,1,2,3,1,2,0,2,1,1,2,3,1,3,1,2,1,2,1,0,2,0,2,2,1,2,1,0,2,1,3,2,1,2,1,0,2,2,0,2,1,2,1,0,2,3,1,2,1,2,1,1,3,0,2,2,1,2,1,1,3,1,3,2,1,2,1,1,3,2,0,2,1,2,1,1,3,3,1,2,1,2,1,2,0,0,2,2,1,2,1,2,0,1,3,2,1,2,1,2,0,2,0,2,1,2,1,2,0,3,1,2,1,2,1,3,1,0,2,2,1,2,1,3,1,1,3,2,1,2,1,3,1,2,0,2,1,2,1,3,1,3,1,2,1,3,0,0,2,0,2,2,1,3,0,0,2,1,3,2,1,3,0,0,2,2,0,2,1,3,0,0,2,3,1,2,1,3,0,1,3,0,2,2,1,3,0,1,3,1,3,2,1,3,0,1,3,2,0,2,1,3,0,1,3,3,1,2,1,3,0,2,0,0,2,2,1,3,0,2,0,1,3,2,1,3,0,2,0,2,0,2,1,3,0,2,0,3,1,2,1,3,0,3,1,0,2,2,1,3,0,3,1,1,3,2,1,3,0,3,1,2,0,2,1,3,0,3,1,3,1,3,0,0,3,0,2,0,2,3,0,0,3,0,2,1,3,3,0,0,3,0,2,2,0,3,0,0,3,0,2,3,1,3,0,0,3,1,3,0,2,3,0,0,3,1,3,1,3,3,0,0,3,1,3,2,0,3,0,0,3,1,3,3,1,3,0,0,3,2,0,0,2,3,0,0,3,2,0,1,3,3,0,0,3,2,0,2,0,3,0,0,3,2,0,3,1,3,0,0,3,3,1,0,2,3,0,0,3,3,1,1,3,3,0,0,3,3,1,2,0,3,0,0,3,3,1,3,1,3,0,1,2,0,2,0,2,3,0,1,2,0,2,1,3,3,0,1,2,0,2,2,0,3,0,1,2,0,2,3,1,3,0,1,2,1,3,0,2,3,0,1,2,1,3,1,3,3,0,1,2,1,3,2,0,3,0,1,2,1,3,3,1,3,0,1,2,2,0,0,2,3,0,1,2,2,0,1,3,3,0,1,2,2,0,2,0,3,0,1,2,2,0,3,1,3,0,1,2,3,1,0,2,3,0,1,2,3,1,1,3,3,0,1,2,3,1,2,0,3,0,1,2,3,1,3,1,3,0,2,1,0,2,0,2,3,0,2,1,0,2,1,3,3,0,2,1,0,2,2,0,3,0,2,1,0,2,3,1,3,0,2,1,1,3,0,2,3,0,2,1,1,3,1,3,3,0,2,1,1,3,2,0,3,0,2,1,1,3,3,1,3,0,2,1,2,0,0,2,3,0,2,1,2,0,1,3,3,0,2,1,2,0,2,0,3,0,2,1,2,0,3,1,3,0,2,1,3,1,0,2,3,0,2,1,3,1,1,3,3,0,2,1,3,1,2,0,3,0,2,1,3,1,3,1,3,0,3,0,0,2,0,2,3,0,3,0,0,2,1,3,3,0,3,0,0,2,2,0,3,0,3,0,0,2,3,1,3,0,3,0,1,3,0,2,3,0,3,0,1,3,1,3,3,0,3,0,1,3,2,0,3,0,3,0,1,3,3,1,3,0,3,0,2,0,0,2,3,0,3,0,2,0,1,3,3,0,3,0,2,0,2,0,3,0,3,0,2,0,3,1,3,0,3,0,3,1,0,2,3,0,3,0,3,1,1,3,3,0,3,0,3,1,2,0,3,0,3,0,3,1,3,1};
float GK_RTR_values[256] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
short int GK_Delta_indices[3][16][4] = {0,0,0,0,0,0,1,1,0,0,2,2,0,0,3,3,1,1,0,0,1,1,1,1,1,1,2,2,1,1,3,3,2,2,0,0,2,2,1,1,2,2,2,2,2,2,3,3,3,3,0,0,3,3,1,1,3,3,2,2,3,3,3,3,0,0,0,0,0,0,1,1,0,0,2,2,0,0,3,3,1,1,0,0,1,1,1,1,1,1,2,2,1,1,3,3,2,2,0,0,2,2,1,1,2,2,2,2,2,2,3,3,3,3,0,0,3,3,1,1,3,3,2,2,3,3,3,3,0,1,0,1,0,1,1,0,0,1,2,3,0,1,3,2,1,0,0,1,1,0,1,0,1,0,2,3,1,0,3,2,2,3,0,1,2,3,1,0,2,3,2,3,2,3,3,2,3,2,0,1,3,2,1,0,3,2,2,3,3,2,3,2};
float GK_Delta_values[3][16] = {1,-1,-1,1,-1,1,1,-1,-1,1,1,-1,1,-1,-1,1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1};
// for mpi use global variables
MPI_Group GK_fullGroup , GK_spaceGroup , GK_timeGroup;
MPI_Comm GK_spaceComm , GK_timeComm;
int GK_localRank;
int GK_localSize;
int GK_timeRank;
int GK_timeSize;
//////////////////////////////////////////////////
static void createMomenta(int Q_sq){
int counter=0;
for(int iQ = 0 ; iQ <= Q_sq ; iQ++){
for(int nx = iQ ; nx >= -iQ ; nx--){
for(int ny = iQ ; ny >= -iQ ; ny--){
for(int nz = iQ ; nz >= -iQ ; nz--){
if( nx*nx + ny*ny + nz*nz == iQ ){
GK_moms[counter][0] = nx;
GK_moms[counter][1] = ny;
GK_moms[counter][2] = nz;
counter++;
}
}
}
}
}
if(counter > MAX_NMOMENTA)errorQuda("Error exceeded max number of momenta\n");
GK_Nmoms=counter;
}
void quda::init_qudaQKXTM(qudaQKXTMinfo *info){
if(GK_init_qudaQKXTM_flag == false){
GK_nColor = 3;
GK_nSpin = 4;
GK_nDim = QUDAQKXTM_DIM;
GK_alphaAPE = info->alphaAPE;
GK_alphaGauss = info->alphaGauss;
GK_nsmearAPE = info->nsmearAPE;
GK_nsmearGauss = info->nsmearGauss;
createMomenta(info->Q_sq);
// from now on depends on lattice and break format we choose
for(int i = 0 ; i < GK_nDim ; i++)
GK_nProc[i] = comm_dim(i);
for(int i = 0 ; i < GK_nDim ; i++){ // take local and total lattice
GK_localL[i] = info->lL[i];
GK_totalL[i] = GK_nProc[i] * GK_localL[i];
}
GK_localVolume = 1;
GK_totalVolume = 1;
for(int i = 0 ; i < GK_nDim ; i++){
GK_localVolume *= GK_localL[i];
GK_totalVolume *= GK_totalL[i];
}
GK_strideFull = GK_localVolume;
for (int i=0; i<GK_nDim; i++) {
GK_surface3D[i] = 1;
for (int j=0; j<GK_nDim; j++) {
if (i==j) continue;
GK_surface3D[i] *= GK_localL[j];
}
}
for(int i = 0 ; i < GK_nDim ; i++)
if( GK_localL[i] == GK_totalL[i] )
GK_surface3D[i] = 0;
for(int i = 0 ; i < GK_nDim ; i++){
GK_plusGhost[i] =0;
GK_minusGhost[i] = 0;
}
#ifdef MULTI_GPU
int lastIndex = GK_localVolume;
for(int i = 0 ; i < GK_nDim ; i++)
if( GK_localL[i] < GK_totalL[i] ){
GK_plusGhost[i] = lastIndex ;
GK_minusGhost[i] = lastIndex + GK_surface3D[i];
lastIndex += 2*GK_surface3D[i];
}
#endif
for(int i = 0 ; i < GK_nDim ; i++){
if( GK_localL[i] < GK_totalL[i])
GK_dimBreak[i] = true;
else
GK_dimBreak[i] = false;
}
const int eps[6][3]=
{
{0,1,2},
{2,0,1},
{1,2,0},
{2,1,0},
{0,2,1},
{1,0,2}
};
const int sgn_eps[6]=
{
+1,+1,+1,-1,-1,-1
};
int procPosition[4];
for(int i= 0 ; i < 4 ; i++)
procPosition[i] = comm_coords(default_topo)[i];
// put it zero but change it later
GK_Nsources = info->Nsources;
if(GK_Nsources > MAX_NSOURCES) errorQuda("Error you exceeded maximum number of source position\n");
for(int is = 0 ; is < GK_Nsources ; is++)
for(int i = 0 ; i < 4 ; i++)
GK_sourcePosition[is][i] = info->sourcePosition[is][i];
// initialization consist also from define device constants
hipMemcpyToSymbol(c_nColor, &GK_nColor, sizeof(int) );
hipMemcpyToSymbol(c_nSpin, &GK_nSpin, sizeof(int) );
hipMemcpyToSymbol(c_nDim, &GK_nDim, sizeof(int) );
hipMemcpyToSymbol(c_stride, &GK_strideFull, sizeof(int) );
hipMemcpyToSymbol(c_alphaAPE, &GK_alphaAPE , sizeof(double) );
hipMemcpyToSymbol(c_alphaGauss, &GK_alphaGauss , sizeof(double) );
hipMemcpyToSymbol(c_threads , &GK_localVolume , sizeof(double) ); // may change
hipMemcpyToSymbol(c_dimBreak , GK_dimBreak , QUDAQKXTM_DIM*sizeof(bool) );
hipMemcpyToSymbol(c_localL , GK_localL , QUDAQKXTM_DIM*sizeof(int) );
hipMemcpyToSymbol(c_totalL , GK_totalL , QUDAQKXTM_DIM*sizeof(int) );
hipMemcpyToSymbol(c_plusGhost , GK_plusGhost , QUDAQKXTM_DIM*sizeof(int) );
hipMemcpyToSymbol(c_minusGhost , GK_minusGhost , QUDAQKXTM_DIM*sizeof(int) );
hipMemcpyToSymbol(c_surface , GK_surface3D , QUDAQKXTM_DIM*sizeof(int) );
hipMemcpyToSymbol(c_eps, &(eps[0][0]) , 6*3*sizeof(int) );
hipMemcpyToSymbol(c_sgn_eps, sgn_eps , 6*sizeof(int) );
hipMemcpyToSymbol(c_procPosition, procPosition, QUDAQKXTM_DIM*sizeof(int));
hipMemcpyToSymbol(c_Nmoms, &GK_Nmoms, sizeof(int));
hipMemcpyToSymbol(c_moms, GK_moms, MAX_NMOMENTA*3*sizeof(short int));
hipMemcpyToSymbol(c_mesons_indices,GK_mesons_indices,10*16*4*sizeof(short int));
hipMemcpyToSymbol(c_NTN_indices,GK_NTN_indices,16*4*sizeof(short int));
hipMemcpyToSymbol(c_NTR_indices,GK_NTR_indices,64*6*sizeof(short int));
hipMemcpyToSymbol(c_RTN_indices,GK_RTN_indices,64*6*sizeof(short int));
hipMemcpyToSymbol(c_RTR_indices,GK_RTR_indices,256*8*sizeof(short int));
hipMemcpyToSymbol(c_Delta_indices,GK_Delta_indices,3*16*4*sizeof(short int));
hipMemcpyToSymbol(c_mesons_values,GK_mesons_values,10*16*sizeof(float));
hipMemcpyToSymbol(c_NTN_values,GK_NTN_values,16*sizeof(float));
hipMemcpyToSymbol(c_NTR_values,GK_NTR_values,64*sizeof(float));
hipMemcpyToSymbol(c_RTN_values,GK_RTN_values,64*sizeof(float));
hipMemcpyToSymbol(c_RTR_values,GK_RTR_values,256*sizeof(float));
hipMemcpyToSymbol(c_Delta_values,GK_Delta_values,3*16*sizeof(float));
checkCudaError();
// create groups of process to use mpi reduce only on spatial points
MPI_Comm_group(MPI_COMM_WORLD, &GK_fullGroup);
int space3D_proc;
space3D_proc = GK_nProc[0] * GK_nProc[1] * GK_nProc[2];
int *ranks = (int*) malloc(space3D_proc*sizeof(int));
for(int i= 0 ; i < space3D_proc ; i++)
ranks[i] = comm_coords(default_topo)[3] + GK_nProc[3]*i;
// for(int i= 0 ; i < space3D_proc ; i++)
// printf("%d (%d,%d,%d,%d)\n",comm_rank(),comm_coords(default_topo)[0],comm_coords(default_topo)[1],comm_coords(default_topo)[2],comm_coords(default_topo)[3]);
// for(int i= 0 ; i < space3D_proc ; i++)
//printf("%d %d\n",comm_rank(),ranks[i]);
MPI_Group_incl(GK_fullGroup,space3D_proc,ranks,&GK_spaceGroup);
MPI_Group_rank(GK_spaceGroup,&GK_localRank);
MPI_Group_size(GK_spaceGroup,&GK_localSize);
MPI_Comm_create(MPI_COMM_WORLD, GK_spaceGroup , &GK_spaceComm);
//if(GK_spaceComm == MPI_COMM_NULL) printf("NULL %d\n",comm_rank());
//exit(-1);
// create group of process to use mpi gather
int *ranksTime = (int*) malloc(GK_nProc[3]*sizeof(int));
for(int i=0 ; i < GK_nProc[3] ; i++)
ranksTime[i] = i;
MPI_Group_incl(GK_fullGroup,GK_nProc[3], ranksTime, &GK_timeGroup);
MPI_Group_rank(GK_timeGroup, &GK_timeRank);
MPI_Group_size(GK_timeGroup, &GK_timeSize);
MPI_Comm_create(MPI_COMM_WORLD, GK_timeGroup, &GK_timeComm);
//////////////////////////////////////////////////////////////////////////////
free(ranks);
free(ranksTime);
GK_init_qudaQKXTM_flag = true;
printfQuda("qudaQKXTM has been initialized\n");
}
else{
printfQuda("???\n");
return;
}
}
void quda::printf_qudaQKXTM(){
if(GK_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
printfQuda("Number of colors is %d\n",GK_nColor);
printfQuda("Number of spins is %d\n",GK_nSpin);
printfQuda("Number of dimensions is %d\n",GK_nDim);
printfQuda("Number of process in each direction is (x,y,z,t) %d x %d x %d x %d\n",GK_nProc[0],GK_nProc[1],GK_nProc[2],GK_nProc[3]);
printfQuda("Total lattice is (x,y,z,t) %d x %d x %d x %d\n",GK_totalL[0],GK_totalL[1],GK_totalL[2],GK_totalL[3]);
printfQuda("Local lattice is (x,y,z,t) %d x %d x %d x %d\n",GK_localL[0],GK_localL[1],GK_localL[2],GK_localL[3]);
printfQuda("Total volume is %d\n",GK_totalVolume);
printfQuda("Local volume is %d\n",GK_localVolume);
printfQuda("Surface is (x,y,z,t) ( %d , %d , %d , %d)\n",GK_surface3D[0],GK_surface3D[1],GK_surface3D[2],GK_surface3D[3]);
printfQuda("The plus Ghost points in directions (x,y,z,t) ( %d , %d , %d , %d )\n",GK_plusGhost[0],GK_plusGhost[1],GK_plusGhost[2],GK_plusGhost[3]);
printfQuda("The Minus Ghost points in directixons (x,y,z,t) ( %d , %d , %d , %d )\n",GK_minusGhost[0],GK_minusGhost[1],GK_minusGhost[2],GK_minusGhost[3]);
printfQuda("For APE smearing we use nsmear = %d , alpha = %lf\n",GK_nsmearAPE,GK_alphaAPE);
printfQuda("For Gauss smearing we use nsmear = %d , alpha = %lf\n",GK_nsmearGauss,GK_alphaGauss);
printfQuda("I got %d source positions to work on\n",GK_Nsources);
printfQuda("I got %d number of momenta to work on\n",GK_Nmoms);
}
static __inline__ __device__ double2 fetch_double2(hipTextureObject_t t, int i)
{
int4 v =tex1Dfetch<int4>(t,i);
return make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z));
}
static __inline__ __device__ float2 fetch_float2(hipTextureObject_t t, int i)
{
float2 v = tex1Dfetch<float2>(t,i);
return v;
}
template<typename Float2>
__device__ inline Float2 operator*(const Float2 a, const Float2 b){
Float2 res;
res.x = a.x*b.x - a.y*b.y;
res.y = a.x*b.y + a.y*b.x;
return res;
}
/*
template<typename Float2, typename Float>
__device__ inline Float2 operator*(const Float a , const Float2 b){
Float2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
*/
__device__ inline float2 operator*(const float a , const float2 b){
float2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
__device__ inline double2 operator*(const double a , const double2 b){
double2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
template<typename Float2>
__device__ inline Float2 operator*(const int a , const Float2 b){
Float2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
template<typename Float2>
__device__ inline Float2 operator+(const Float2 a, const Float2 b){
Float2 res;
res.x = a.x + b.x;
res.y = a.y + b.y;
return res;
}
template<typename Float2>
__device__ inline Float2 operator-(const Float2 a, const Float2 b){
Float2 res;
res.x = a.x - b.x;
res.y = a.y - b.y;
return res;
}
template<typename Float2>
__device__ inline Float2 conj(const Float2 a){
Float2 res;
res.x = a.x;
res.y = -a.y;
return res;
}
__device__ inline float norm(const float2 a){
float res;
res = sqrt(a.x*a.x + a.y*a.y);
return res;
}
__device__ inline double norm(const double2 a){
double res;
res = sqrt(a.x*a.x + a.y*a.y);
return res;
}
template<typename Float2>
__device__ inline Float2 get_Projector(Float2 projector[4][4],
WHICHPARTICLE PARTICLE,
WHICHPROJECTOR PID){
// important Projectors must be in twisted basis
#include <projectors_tm_base.h>
}
template<typename Float2>
__device__ inline Float2 get_Operator(Float2 gamma[4][4], int flag,
WHICHPARTICLE TESTPARTICLE,
int partFlag){
#include <gammas_tm_base.h>
}
#include <core_def.h>
__global__ void calculatePlaq_kernel_double(hipTextureObject_t gaugeTexPlaq,
double *partial_plaq){
#define FLOAT2 double2
#define FLOAT double
#define READGAUGE_FLOAT READGAUGE_double
#include <plaquette_core.h>
#undef FLOAT2
#undef FLOAT
#undef READGAUGE_FLOAT
}
__global__ void calculatePlaq_kernel_float(hipTextureObject_t gaugeTexPlaq,
float *partial_plaq){
#define FLOAT2 float2
#define FLOAT float
#define READGAUGE_FLOAT READGAUGE_float
#include <plaquette_core.h>
#undef READGAUGE_FLOAT
#undef FLOAT2
#undef FLOAT
}
__global__ void gaussianSmearing_kernel_float(float2* out,
hipTextureObject_t vecInTex,
hipTextureObject_t gaugeTex ){
#define FLOAT2 float2
#define READGAUGE_FLOAT READGAUGE_float
#define READVECTOR_FLOAT READVECTOR_float
#include <Gauss_core.h>
#undef READGAUGE_FLOAT
#undef READVECTOR_FLOAT
#undef FLOAT2
}
__global__ void gaussianSmearing_kernel_double(double2* out,
hipTextureObject_t vecInTex,
hipTextureObject_t gaugeTex ){
#define FLOAT2 double2
#define READGAUGE_FLOAT READGAUGE_double
#define READVECTOR_FLOAT READVECTOR_double
#include <Gauss_core.h>
#undef READGAUGE_FLOAT
#undef READVECTOR_FLOAT
#undef FLOAT2
}
__global__ void contractMesons_kernel_float(float2* block,
hipTextureObject_t prop1Tex,
hipTextureObject_t prop2Tex,
int it, int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <contractMesons_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void contractMesons_kernel_PosSpace_float(float2* block,
hipTextureObject_t prop1Tex,
hipTextureObject_t prop2Tex,
int it, int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <contractMesons_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void contractMesons_kernel_double(double2* block,
hipTextureObject_t prop1Tex,
hipTextureObject_t prop2Tex,
int it, int x0, int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <contractMesons_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void contractBaryons_kernel_float(float2* block,
hipTextureObject_t prop1Tex,
hipTextureObject_t prop2Tex,
int it, int x0, int y0, int z0, int ip){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <contractBaryons_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void contractBaryons_kernel_PosSpace_float(float2* block,
hipTextureObject_t prop1Tex,
hipTextureObject_t prop2Tex,
int it, int x0, int y0, int z0, int ip){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <contractBaryons_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
/*
__global__ void contractBaryons_kernel_double(double2* block, hipTextureObject_t prop1Tex, hipTextureObject_t prop2Tex,int it, int x0, int y0, int z0, int ip){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <contractBaryons_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
*/
__global__ void seqSourceFixSinkPart1_kernel_float(float2* out, int timeslice,
hipTextureObject_t tex1,
hipTextureObject_t tex2,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <seqSourceFixSinkPart1_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void seqSourceFixSinkPart2_kernel_float(float2* out,
int timeslice,
hipTextureObject_t tex,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <seqSourceFixSinkPart2_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void seqSourceFixSinkPart1_kernel_double(double2* out,
int timeslice,
hipTextureObject_t tex1,
hipTextureObject_t tex2,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <seqSourceFixSinkPart1_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void seqSourceFixSinkPart2_kernel_double(double2* out,
int timeslice,
hipTextureObject_t tex,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <seqSourceFixSinkPart2_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
//- Fix Sink kernels, ultra-local
__global__ void fixSinkContractions_local_kernel_float(float2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_local_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_local_kernel_PosSpace_float(float2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_local_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_local_kernel_double(double2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_local_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_local_kernel_PosSpace_double(double2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_local_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
//-----------------------------------------------
//- Fix Sink kernels, noether
__global__ void fixSinkContractions_noether_kernel_float(float2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
hipTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_noether_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_noether_kernel_PosSpace_float(float2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
hipTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_noether_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_noether_kernel_double(double2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
hipTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_noether_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_noether_kernel_PosSpace_double(double2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
hipTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_noether_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
//-----------------------------------------------
//- Fix Sink kernels, one-derivative
__global__ void fixSinkContractions_oneD_kernel_float(float2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
hipTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int dir, int x0,
int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_oneD_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_oneD_kernel_PosSpace_float(float2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
hipTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int dir, int x0,
int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_oneD_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_oneD_kernel_double(double2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
hipTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int dir, int x0,
int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_oneD_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_oneD_kernel_PosSpace_double(double2* block,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
hipTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int dir, int x0,
int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_oneD_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
//-----------------------------------------------
template<typename Float, typename Float2>
__global__ void scaleVector_kernel(Float a, Float2* inOut){
#include <scaleVector_core.h>
}
template<typename Float2>
__global__ void uploadToCuda_kernel(Float2 *in, double2 *outEven, double2 *outOdd){
#include <uploadToCuda_core.h>
}
template<typename Float2>
__global__ void downloadFromCuda_kernel(Float2 *out, double2 *inEven, double2 *inOdd){
#include <downloadFromCuda_core.h>
}
template<typename Float2>
__global__ void rotateToPhysicalBase_kernel(Float2 *inOut, int sign){
#include <rotateToPhysicalBase_core.h>
}
__global__ void castDoubleToFloat_kernel(float2 *out, double2 *in){
#include <castDoubleToFloat_core.h>
}
__global__ void castFloatToDouble_kernel(double2 *out, float2 *in){
#include <castFloatToDouble_core.h>
}
template<typename Float2>
__global__ void conjugate_vector_kernel(Float2 *inOut){
#include <conjugate_vector_core.h>
}
template<typename Float2>
__global__ void apply_gamma5_vector_kernel(Float2 *inOut){
#include <apply_gamma5_vector_core.h>
}
template<typename Float2>
__global__ void conjugate_propagator_kernel(Float2 *inOut){
#include <conjugate_propagator_core.h>
}
template<typename Float2>
__global__ void apply_gamma5_propagator_kernel(Float2 *inOut){
#include <apply_gamma5_propagator_core.h>
}
template<typename Float>
static Float calculatePlaq_kernel(hipTextureObject_t gaugeTexPlaq){
Float plaquette = 0.;
Float globalPlaquette = 0.;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
Float *h_partial_plaq = NULL;
Float *d_partial_plaq = NULL;
h_partial_plaq = (Float*) malloc(gridDim.x * sizeof(Float) );
if(h_partial_plaq == NULL) errorQuda("Error allocate memory for host partial plaq");
hipMalloc((void**)&d_partial_plaq, gridDim.x * sizeof(Float));
#ifdef TIMING_REPORT
hipEvent_t start,stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
#endif
if( typeid(Float) == typeid(float) )
hipLaunchKernelGGL(( calculatePlaq_kernel_float), dim3(gridDim),dim3(blockDim), 0, 0, gaugeTexPlaq,(float*) d_partial_plaq);
else if(typeid(Float) == typeid(double))
hipLaunchKernelGGL(( calculatePlaq_kernel_double), dim3(gridDim),dim3(blockDim), 0, 0, gaugeTexPlaq,(double*) d_partial_plaq);
#ifdef TIMING_REPORT
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printfQuda("Elapsed time for plaquette kernel is %f ms\n",elapsedTime);
#endif
hipMemcpy(h_partial_plaq, d_partial_plaq , gridDim.x * sizeof(Float) , hipMemcpyDeviceToHost);
for(int i = 0 ; i < gridDim.x ; i++)
plaquette += h_partial_plaq[i];
free(h_partial_plaq);
hipFree(d_partial_plaq);
checkCudaError();
int rc;
if(typeid(Float) == typeid(double))
rc = MPI_Allreduce(&plaquette , &globalPlaquette , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
else if( typeid(Float) == typeid(float) )
rc = MPI_Allreduce(&plaquette , &globalPlaquette , 1 , MPI_FLOAT , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for plaquette");
return globalPlaquette/(GK_totalVolume*GK_nColor*6);
}
void quda::run_calculatePlaq_kernel(hipTextureObject_t gaugeTexPlaq,
int precision){
if(precision == 4){
float plaq = calculatePlaq_kernel<float>(gaugeTexPlaq);
printfQuda("Calculated plaquette in single precision is %f\n",plaq);
}
else if(precision == 8){
double plaq = calculatePlaq_kernel<double>(gaugeTexPlaq);
printfQuda("Calculated plaquette in double precision is %lf\n",plaq);
}
else{
errorQuda("Precision not supported\n");
}
}
template<typename Float>
static void gaussianSmearing_kernel(void* out,
hipTextureObject_t vecInTex,
hipTextureObject_t gaugeTex){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
#ifdef TIMING_REPORT
hipEvent_t start,stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
#endif
if( typeid(Float) == typeid(float) )
hipLaunchKernelGGL(( gaussianSmearing_kernel_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) out, vecInTex, gaugeTex);
else if(typeid(Float) == typeid(double))
hipLaunchKernelGGL(( gaussianSmearing_kernel_double), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) out, vecInTex, gaugeTex);
checkCudaError();
#ifdef TIMING_REPORT
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printfQuda("Elapsed time for 1 step in gaussian smearing is %f ms\n",elapsedTime);
#endif
}
void quda::run_GaussianSmearing(void* out,
hipTextureObject_t vecInTex,
hipTextureObject_t gaugeTex,
int precision){
if(precision == 4){
gaussianSmearing_kernel<float>(out,vecInTex,gaugeTex);
}
else if(precision == 8){
gaussianSmearing_kernel<double>(out,vecInTex,gaugeTex);
}
else{
errorQuda("Precision not supported\n");
}
}
void quda::run_UploadToCuda(void* in,ColorSpinorField &qudaVec, int precision, bool isEven){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if( qudaVec.SiteSubset() == QUDA_PARITY_SITE_SUBSET ){
if( isEven ){
if(precision == 4){
hipLaunchKernelGGL(( uploadToCuda_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) in,(double2*) qudaVec.V(), NULL );
}
else if(precision == 8){
hipLaunchKernelGGL(( uploadToCuda_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) in,(double2*) qudaVec.V(), NULL );
}
else{
errorQuda("Precision not supported\n");
}
}
else{
if(precision == 4){
hipLaunchKernelGGL(( uploadToCuda_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) in, NULL,(double2*) qudaVec.V() );
}
else if(precision == 8){
hipLaunchKernelGGL(( uploadToCuda_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) in, NULL,(double2*) qudaVec.V());
}
else{
errorQuda("Precision not supported\n");
}
}
}
else{
// printfQuda("### Uploading to QUDA both even and odd sites\n");
if(precision == 4){
hipLaunchKernelGGL(( uploadToCuda_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) in,(double2*) qudaVec.Even().V(), (double2*) qudaVec.Odd().V() );
}
else if(precision == 8){
hipLaunchKernelGGL(( uploadToCuda_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) in,(double2*) qudaVec.Even().V(), (double2*) qudaVec.Odd().V() );
}
else{
errorQuda("Precision not supported\n");
}
}
checkCudaError();
}
void quda::run_DownloadFromCuda(void* out,ColorSpinorField &qudaVec, int precision, bool isEven){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if( qudaVec.SiteSubset() == QUDA_PARITY_SITE_SUBSET ){
if( isEven ){
if(precision == 4){
hipLaunchKernelGGL(( downloadFromCuda_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) out,(double2*) qudaVec.V(), NULL );
}
else if(precision == 8){
hipLaunchKernelGGL(( downloadFromCuda_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) out,(double2*) qudaVec.V(), NULL );
}
else{
errorQuda("Precision not supported\n");
}
}
else{
if(precision == 4){
hipLaunchKernelGGL(( downloadFromCuda_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) out, NULL,(double2*) qudaVec.V() );
}
else if(precision == 8){
hipLaunchKernelGGL(( downloadFromCuda_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) out, NULL,(double2*) qudaVec.V());
}
else{
errorQuda("Precision not supported\n");
}
}
}
else{
// printfQuda("### Downloading from QUDA both even and odd sites\n");
if(precision == 4){
hipLaunchKernelGGL(( downloadFromCuda_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) out,(double2*) qudaVec.Even().V(), (double2*) qudaVec.Odd().V() );
}
else if(precision == 8){
hipLaunchKernelGGL(( downloadFromCuda_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) out,(double2*) qudaVec.Even().V(), (double2*) qudaVec.Odd().V() );
}
else{
errorQuda("Precision not supported\n");
}
}
checkCudaError();
}
void quda::run_ScaleVector(double a, void* inOut, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
hipLaunchKernelGGL(( scaleVector_kernel<float,float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float) a, (float2*) inOut);
}
else if(precision == 8){
hipLaunchKernelGGL(( scaleVector_kernel<double,double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double) a, (double2*) inOut);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
template<typename Float2,typename Float>
static void contractMesons_kernel(hipTextureObject_t texProp1,
hipTextureObject_t texProp2,
Float (*corr)[2][10],
int it, int isource,
CORR_SPACE CorrSpace){
if( typeid(Float2) != typeid(float2) ) errorQuda("Unsupported precision for Meson 2pt Contraction kernels!\n");
int SpVol = GK_localVolume/GK_localL[3];
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (SpVol + blockDim.x -1)/blockDim.x , 1 , 1); // spawn threads only for the spatial volume
Float *h_partial_block = NULL;
Float *d_partial_block = NULL;
if(CorrSpace==POSITION_SPACE){
long int alloc_size = blockDim.x * gridDim.x; // That's basically local spatial volume
h_partial_block = (Float*)malloc(alloc_size*2*10*2*sizeof(Float));
if(h_partial_block == NULL) errorQuda("contractMesons_kernel: Cannot allocate host block.\n");
hipMalloc((void**)&d_partial_block, alloc_size*2*10*2*sizeof(Float));
checkCudaError();
hipLaunchKernelGGL(( contractMesons_kernel_PosSpace_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_partial_block, texProp1, texProp2, it, GK_sourcePosition[isource][0] , GK_sourcePosition[isource][1], GK_sourcePosition[isource][2]);
checkCudaError();
hipMemcpy(h_partial_block , d_partial_block , alloc_size*2*10*2*sizeof(Float) , hipMemcpyDeviceToHost);
checkCudaError();
//-C.K. Copy host block into corr buffer
for(int pt = 0; pt < 2 ; pt++){
for( int mes = 0; mes < 10; mes++){
for(int sv = 0; sv < SpVol ; sv++){
corr[ 0 + 2*sv + 2*SpVol*it ][pt][mes] = h_partial_block[ 0 + 2*sv + 2*SpVol*mes + 2*SpVol*10*pt ];
corr[ 1 + 2*sv + 2*SpVol*it ][pt][mes] = h_partial_block[ 1 + 2*sv + 2*SpVol*mes + 2*SpVol*10*pt ];
}
}
}
free(h_partial_block);
hipFree(d_partial_block);
checkCudaError();
}
else if(CorrSpace==MOMENTUM_SPACE){
h_partial_block = (Float*)malloc(GK_Nmoms*2*10*gridDim.x*2*sizeof(Float));
if(h_partial_block == NULL) errorQuda("Error problem with allocation\n");
hipMalloc((void**)&d_partial_block, GK_Nmoms*2*10*gridDim.x*2 * sizeof(Float) );
checkCudaError();
Float *reduction =(Float*) calloc(GK_Nmoms*2*10*2,sizeof(Float));
hipLaunchKernelGGL(( contractMesons_kernel_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_partial_block, texProp1, texProp2, it, GK_sourcePosition[isource][0] , GK_sourcePosition[isource][1], GK_sourcePosition[isource][2]);
checkCudaError();
hipMemcpy(h_partial_block , d_partial_block , GK_Nmoms*2*10*gridDim.x*2 * sizeof(Float) , hipMemcpyDeviceToHost);
checkCudaError();
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iu = 0 ; iu < 2 ; iu++)
for(int ip = 0 ; ip < 10 ; ip++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[imom*2*10*2 + iu*10*2 + ip*2 + 0] += h_partial_block[imom*2*10*gridDim.x*2 + iu*10*gridDim.x*2 + ip*gridDim.x*2 + i*2 + 0];
reduction[imom*2*10*2 + iu*10*2 + ip*2 + 1] += h_partial_block[imom*2*10*gridDim.x*2 + iu*10*gridDim.x*2 + ip*gridDim.x*2 + i*2 + 1];
}
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iu = 0 ; iu < 2 ; iu++)
for(int ip = 0 ; ip < 10 ; ip++){
corr[it*GK_Nmoms*2 + imom*2 + 0][iu][ip] = reduction[imom*2*10*2 + iu*10*2 + ip*2 + 0];
corr[it*GK_Nmoms*2 + imom*2 + 1][iu][ip] = reduction[imom*2*10*2 + iu*10*2 + ip*2 + 1];
}
free(h_partial_block);
hipFree(d_partial_block);
checkCudaError();
free(reduction);
}//-CorrSpace else
else errorQuda("contractMesons_kernel: Supports only POSITION_SPACE and MOMENTUM_SPACE!\n");
}
void quda::run_contractMesons(hipTextureObject_t texProp1,
hipTextureObject_t texProp2,
void* corr, int it, int isource,
int precision, CORR_SPACE CorrSpace){
if (CorrSpace==POSITION_SPACE) hipFuncSetCacheConfig(contractMesons_kernel_PosSpace_float,hipFuncCachePreferShared);
else if(CorrSpace==MOMENTUM_SPACE) hipFuncSetCacheConfig(contractMesons_kernel_float ,hipFuncCachePreferShared);
else errorQuda("run_contractMesons: Supports only POSITION_SPACE and MOMENTUM_SPACE!\n");
checkCudaError();
if(precision == 4) contractMesons_kernel<float2,float>(texProp1,texProp2,(float(*)[2][10]) corr,it, isource, CorrSpace);
else if(precision == 8) errorQuda("Double precision in Meson 2pt Contractions unsupported!!!\n");
else errorQuda("run_contractMesons: Precision %d not supported\n",precision);
}
template<typename Float2,typename Float>
static void contractBaryons_kernel(hipTextureObject_t texProp1,
hipTextureObject_t texProp2,
Float (*corr)[2][10][4][4],
int it, int isource,
CORR_SPACE CorrSpace){
if( typeid(Float2) != typeid(float2) ) errorQuda("Unsupported precision for Baryon 2pt Contraction kernels!\n");
int SpVol = GK_localVolume/GK_localL[3];
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (SpVol + blockDim.x -1)/blockDim.x , 1 , 1); // spawn threads only for the spatial volume
Float *h_partial_block = NULL;
Float *d_partial_block = NULL;
if(CorrSpace==POSITION_SPACE){
long int alloc_size = blockDim.x * gridDim.x; // That's basically local spatial volume
h_partial_block = (Float*)malloc(alloc_size*2*4*4*2*sizeof(Float));
if(h_partial_block == NULL) errorQuda("contractBaryons_kernel: Cannot allocate host block.\n");
hipMalloc((void**)&d_partial_block, alloc_size*2*4*4*2*sizeof(Float));
checkCudaError();
for(int ip = 0 ; ip < 10 ; ip++){
hipLaunchKernelGGL(( contractBaryons_kernel_PosSpace_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_partial_block, texProp1, texProp2, it, GK_sourcePosition[isource][0] , GK_sourcePosition[isource][1], GK_sourcePosition[isource][2],ip);
checkCudaError();
hipMemcpy(h_partial_block , d_partial_block , alloc_size*2*4*4*2*sizeof(Float) , hipMemcpyDeviceToHost); //-C.K. Copy device block into host block
checkCudaError();
//-C.K. Copy host block into corr buffer
for(int pt = 0; pt < 2 ; pt++){
for(int ga = 0 ; ga < 4 ; ga++){
for(int gap = 0; gap < 4 ; gap++){
for(int sv = 0; sv < SpVol ; sv++){
corr[ 0 + 2*sv + 2*SpVol*it ][pt][ip][ga][gap] = h_partial_block[ 0 + 2*sv + 2*SpVol*gap + 2*SpVol*4*ga + 2*SpVol*4*4*pt ];
corr[ 1 + 2*sv + 2*SpVol*it ][pt][ip][ga][gap] = h_partial_block[ 1 + 2*sv + 2*SpVol*gap + 2*SpVol*4*ga + 2*SpVol*4*4*pt ];
}}}
}
}//-ip
free(h_partial_block);
hipFree(d_partial_block);
checkCudaError();
}
else if(CorrSpace==MOMENTUM_SPACE){
h_partial_block = (Float*)malloc(GK_Nmoms*2*4*4*gridDim.x*2*sizeof(Float));
if(h_partial_block == NULL) errorQuda("contractBaryons_kernel: Cannot allocate host block.\n");
hipMalloc((void**)&d_partial_block, GK_Nmoms*2*4*4*gridDim.x*2 * sizeof(Float) );
checkCudaError();
Float *reduction =(Float*) calloc(GK_Nmoms*2*4*4*2,sizeof(Float));
for(int ip = 0 ; ip < 10 ; ip++){
hipLaunchKernelGGL(( contractBaryons_kernel_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_partial_block, texProp1, texProp2, it, GK_sourcePosition[isource][0] , GK_sourcePosition[isource][1], GK_sourcePosition[isource][2],ip);
checkCudaError();
hipMemcpy(h_partial_block , d_partial_block , GK_Nmoms*2*4*4*gridDim.x*2 * sizeof(Float) , hipMemcpyDeviceToHost);
checkCudaError();
memset(reduction,0,GK_Nmoms*2*4*4*2*sizeof(Float));
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iu = 0 ; iu < 2 ; iu++)
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gammap = 0 ; gammap < 4 ; gammap++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[imom*2*4*4*2 + iu*4*4*2 + gamma*4*2 + gammap*2 + 0] += h_partial_block[imom*2*4*4*gridDim.x*2 + iu*4*4*gridDim.x*2 + gamma*4*gridDim.x*2 + gammap*gridDim.x*2 + i*2 + 0];
reduction[imom*2*4*4*2 + iu*4*4*2 + gamma*4*2 + gammap*2 + 1] += h_partial_block[imom*2*4*4*gridDim.x*2 + iu*4*4*gridDim.x*2 + gamma*4*gridDim.x*2 + gammap*gridDim.x*2 + i*2 + 1];
}
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iu = 0 ; iu < 2 ; iu++)
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gammap = 0 ; gammap < 4 ; gammap++){
corr[it*GK_Nmoms*2 + imom*2 + 0][iu][ip][gamma][gammap] = reduction[imom*2*4*4*2 + iu*4*4*2 + gamma*4*2 + gammap*2 + 0];
corr[it*GK_Nmoms*2 + imom*2 + 1][iu][ip][gamma][gammap] = reduction[imom*2*4*4*2 + iu*4*4*2 + gamma*4*2 + gammap*2 + 1];
}
}//-ip
free(h_partial_block);
hipFree(d_partial_block);
checkCudaError();
free(reduction);
}//-CorrSpace else
else errorQuda("contractBaryons_kernel: Supports only POSITION_SPACE and MOMENTUM_SPACE!\n");
}
void quda::run_contractBaryons(hipTextureObject_t texProp1,
hipTextureObject_t texProp2,
void* corr, int it,
int isource, int precision,
CORR_SPACE CorrSpace){
if (CorrSpace==POSITION_SPACE) hipFuncSetCacheConfig(contractBaryons_kernel_PosSpace_float,hipFuncCachePreferShared);
else if(CorrSpace==MOMENTUM_SPACE) hipFuncSetCacheConfig(contractBaryons_kernel_float ,hipFuncCachePreferShared);
else errorQuda("run_contractBaryons: Supports only POSITION_SPACE and MOMENTUM_SPACE!\n");
checkCudaError();
if(precision == 4) contractBaryons_kernel<float2,float>(texProp1,texProp2,(float(*)[2][10][4][4]) corr,it, isource, CorrSpace);
else if(precision == 8) errorQuda("Double precision in Baryon 2pt Contractions unsupported!!!\n");
else errorQuda("run_contractBaryons: Precision %d not supported\n",precision);
}
void quda::run_rotateToPhysicalBase(void* inOut, int sign, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
hipLaunchKernelGGL(( rotateToPhysicalBase_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) inOut,sign);
}
else if(precision == 8){
hipLaunchKernelGGL(( rotateToPhysicalBase_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) inOut,sign);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
void quda::run_castDoubleToFloat(void *out, void *in){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( castDoubleToFloat_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) out, (double2*) in);
checkCudaError();
}
void quda::run_castFloatToDouble(void *out, void *in){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( castFloatToDouble_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) out, (float2*) in);
checkCudaError();
}
void quda::run_conjugate_vector(void *inOut, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
hipLaunchKernelGGL(( conjugate_vector_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) inOut);
}
else if(precision == 8){
hipLaunchKernelGGL(( conjugate_vector_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) inOut);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
void quda::run_apply_gamma5_vector(void *inOut, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
hipLaunchKernelGGL(( apply_gamma5_vector_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) inOut);
}
else if(precision == 8){
hipLaunchKernelGGL(( apply_gamma5_vector_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) inOut);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
void quda::run_conjugate_propagator(void *inOut, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
hipLaunchKernelGGL(( conjugate_propagator_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) inOut);
}
else if(precision == 8){
hipLaunchKernelGGL(( conjugate_propagator_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) inOut);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
void quda::run_apply_gamma5_propagator(void *inOut, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
hipLaunchKernelGGL(( apply_gamma5_propagator_kernel<float2>), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) inOut);
}
else if(precision == 8){
hipLaunchKernelGGL(( apply_gamma5_propagator_kernel<double2>), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) inOut);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
template<typename Float>
static void seqSourceFixSinkPart1_kernel(void* out, int timeslice,
hipTextureObject_t tex1,
hipTextureObject_t tex2,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume/GK_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
if( typeid(Float) == typeid(float) )
hipLaunchKernelGGL(( seqSourceFixSinkPart1_kernel_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) out, timeslice, tex1, tex2, c_nu, c_c2, PID, PARTICLE);
else if(typeid(Float) == typeid(double))
hipLaunchKernelGGL(( seqSourceFixSinkPart1_kernel_double), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) out, timeslice, tex1, tex2, c_nu, c_c2, PID, PARTICLE);
checkCudaError();
}
template<typename Float>
static void seqSourceFixSinkPart2_kernel(void* out, int timeslice,
hipTextureObject_t tex,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume/GK_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
if( typeid(Float) == typeid(float) )
hipLaunchKernelGGL(( seqSourceFixSinkPart2_kernel_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) out, timeslice, tex, c_nu, c_c2, PID, PARTICLE);
else if(typeid(Float) == typeid(double))
hipLaunchKernelGGL(( seqSourceFixSinkPart2_kernel_double), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) out, timeslice, tex, c_nu, c_c2, PID, PARTICLE);
checkCudaError();
}
void quda::run_seqSourceFixSinkPart1(void* out, int timeslice,
hipTextureObject_t tex1,
hipTextureObject_t tex2,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE,
int precision){
if(precision == 4){
seqSourceFixSinkPart1_kernel<float>(out, timeslice, tex1, tex2, c_nu, c_c2, PID, PARTICLE);
}
else if(precision == 8){
seqSourceFixSinkPart1_kernel<double>(out, timeslice, tex1, tex2, c_nu, c_c2, PID, PARTICLE);
}
else{
errorQuda("Precision not supported\n");
}
}
void quda::run_seqSourceFixSinkPart2(void* out, int timeslice,
hipTextureObject_t tex,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE,
int precision){
if(precision == 4){
seqSourceFixSinkPart2_kernel<float>(out, timeslice, tex, c_nu, c_c2, PID, PARTICLE);
}
else if(precision == 8){
seqSourceFixSinkPart2_kernel<double>(out, timeslice, tex, c_nu, c_c2, PID, PARTICLE);
}
else{
errorQuda("Precision not supported\n");
}
}
template<typename Float2,typename Float>
static void fixSinkContractions_kernel(void* corrThp_local,
void* corrThp_noether,
void* corrThp_oneD,
hipTextureObject_t fwdTex,
hipTextureObject_t seqTex,
hipTextureObject_t gaugeTex,
WHICHPARTICLE PARTICLE,
int partflag, int itime,
int isource, CORR_SPACE CorrSpace){
int SpVol = GK_localVolume/GK_localL[3];
int lV = GK_localVolume;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume/GK_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // spawn threads only for the spatial volume
Float *h_partial_block = NULL;
Float *d_partial_block = NULL;
if(CorrSpace==POSITION_SPACE){
size_t alloc_buf;
size_t copy_buf;
//- Ultra-local operators
alloc_buf = blockDim.x * gridDim.x * 16 * 2 * sizeof(Float);
copy_buf = SpVol * 16 * 2 * sizeof(Float);
hipMalloc((void**)&d_partial_block, alloc_buf);
checkCudaError();
hipMemset(d_partial_block, 0, alloc_buf);
checkCudaError();
if( typeid(Float2) == typeid(float2) )
hipLaunchKernelGGL(( fixSinkContractions_local_kernel_PosSpace_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_partial_block, fwdTex, seqTex, PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
hipLaunchKernelGGL(( fixSinkContractions_local_kernel_PosSpace_double), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block, fwdTex, seqTex, PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
//-C.K. Copy device block into corrThp_local
hipMemcpy(&(((Float*)corrThp_local)[2*16*SpVol*itime]) , d_partial_block , copy_buf , hipMemcpyDeviceToHost);
checkCudaError();
//----------------------------------------------------------------------
//- One-derivative operators
for(int dir = 0 ; dir < 4 ; dir++){
hipMemset(d_partial_block, 0, alloc_buf);
checkCudaError();
if( typeid(Float2) == typeid(float2) )
hipLaunchKernelGGL(( fixSinkContractions_oneD_kernel_PosSpace_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime, dir,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
hipLaunchKernelGGL(( fixSinkContractions_oneD_kernel_PosSpace_double), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime, dir,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
//-C.K. Copy device block into corrThp_oneD for each dir
hipMemcpy(&(((Float*)corrThp_oneD)[2*16*SpVol*itime + 2*16*lV*dir]), d_partial_block , copy_buf , hipMemcpyDeviceToHost);
checkCudaError();
}//-dir
//----------------------------------------------------------------------
//- Noether, conserved current
//- it's better to reallocate the device block buffer here
hipFree(d_partial_block);
checkCudaError();
d_partial_block = NULL;
alloc_buf = blockDim.x * gridDim.x * 4 * 2 * sizeof(Float);
copy_buf = SpVol * 4 * 2 * sizeof(Float);
hipMalloc((void**)&d_partial_block, alloc_buf);
checkCudaError();
hipMemset(d_partial_block, 0, alloc_buf);
checkCudaError();
if( typeid(Float2) == typeid(float2) )
hipLaunchKernelGGL(( fixSinkContractions_noether_kernel_PosSpace_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
hipLaunchKernelGGL(( fixSinkContractions_noether_kernel_PosSpace_double), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
//-C.K. Copy device block to corrThp_noether
hipMemcpy(&(((Float*)corrThp_noether)[2*4*SpVol*itime]) , d_partial_block , copy_buf , hipMemcpyDeviceToHost);
checkCudaError();
hipFree(d_partial_block);
checkCudaError();
}
else if(CorrSpace==MOMENTUM_SPACE){
h_partial_block = (Float*)malloc(GK_Nmoms*16*gridDim.x*2*sizeof(Float));
if(h_partial_block == NULL) errorQuda("fixSinkContractions_kernel: Cannot allocate host block.\n");
hipMalloc((void**)&d_partial_block, GK_Nmoms*16*gridDim.x*2 * sizeof(Float) );
checkCudaError();
Float *reduction =(Float*) calloc(GK_Nmoms*16*2,sizeof(Float));
//- Ultra-local operators
if( typeid(Float2) == typeid(float2) )
hipLaunchKernelGGL(( fixSinkContractions_local_kernel_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_partial_block, fwdTex, seqTex, PARTICLE,
partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
hipLaunchKernelGGL(( fixSinkContractions_local_kernel_double), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block, fwdTex, seqTex, PARTICLE,
partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
hipMemcpy(h_partial_block , d_partial_block , GK_Nmoms*16*gridDim.x*2 * sizeof(Float) , hipMemcpyDeviceToHost);
checkCudaError();
memset(reduction,0,GK_Nmoms*16*2*sizeof(Float));
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iop = 0 ; iop < 16 ; iop++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[imom*16*2 + iop*2 + 0] += h_partial_block[imom*16*gridDim.x*2 + iop*gridDim.x*2 + i*2 + 0];
reduction[imom*16*2 + iop*2 + 1] += h_partial_block[imom*16*gridDim.x*2 + iop*gridDim.x*2 + i*2 + 1];
}
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iop = 0 ; iop < 16 ; iop++)
for(int i =0 ; i < gridDim.x ; i++){
((Float*) corrThp_local)[itime*GK_Nmoms*16*2 + imom*16*2 + iop*2 + 0] = reduction[imom*16*2 + iop*2 + 0];
((Float*) corrThp_local)[itime*GK_Nmoms*16*2 + imom*16*2 + iop*2 + 1] = reduction[imom*16*2 + iop*2 + 1];
}
//---------------------------------------------------------------
//- Noether, conserved current
if( typeid(Float2) == typeid(float2) )
hipLaunchKernelGGL(( fixSinkContractions_noether_kernel_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
hipLaunchKernelGGL(( fixSinkContractions_noether_kernel_double), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
hipMemcpy(h_partial_block , d_partial_block , GK_Nmoms*4*gridDim.x*2 * sizeof(Float) , hipMemcpyDeviceToHost);
checkCudaError();
memset(reduction,0,GK_Nmoms*4*2*sizeof(Float));
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int dir = 0 ; dir < 4 ; dir++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[imom*4*2 + dir*2 + 0] += h_partial_block[imom*4*gridDim.x*2 + dir*gridDim.x*2 + i*2 + 0];
reduction[imom*4*2 + dir*2 + 1] += h_partial_block[imom*4*gridDim.x*2 + dir*gridDim.x*2 + i*2 + 1];
}
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int dir = 0 ; dir < 4 ; dir++)
for(int i =0 ; i < gridDim.x ; i++){
((Float*) corrThp_noether)[itime*GK_Nmoms*4*2 + imom*4*2 + dir*2 + 0] = reduction[imom*4*2 + dir*2 + 0];
((Float*) corrThp_noether)[itime*GK_Nmoms*4*2 + imom*4*2 + dir*2 + 1] = reduction[imom*4*2 + dir*2 + 1];
}
//---------------------------------------------------------------
//- One-derivative operators
for(int dir = 0 ; dir < 4 ; dir++){
if( typeid(Float2) == typeid(float2) )
hipLaunchKernelGGL(( fixSinkContractions_oneD_kernel_float), dim3(gridDim),dim3(blockDim), 0, 0, (float2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime, dir,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
hipLaunchKernelGGL(( fixSinkContractions_oneD_kernel_double), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime, dir,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
hipMemcpy(h_partial_block , d_partial_block , GK_Nmoms*16*gridDim.x*2 * sizeof(Float) , hipMemcpyDeviceToHost);
checkCudaError();
memset(reduction,0,GK_Nmoms*16*2*sizeof(Float));
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iop = 0 ; iop < 16 ; iop++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[imom*16*2 + iop*2 + 0] += h_partial_block[imom*16*gridDim.x*2 + iop*gridDim.x*2 + i*2 + 0];
reduction[imom*16*2 + iop*2 + 1] += h_partial_block[imom*16*gridDim.x*2 + iop*gridDim.x*2 + i*2 + 1];
}
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iop = 0 ; iop < 16 ; iop++)
for(int i =0 ; i < gridDim.x ; i++){
((Float*) corrThp_oneD)[itime*GK_Nmoms*4*16*2 + imom*4*16*2 + dir*16*2 + iop*2 + 0] = reduction[imom*16*2 + iop*2 + 0];
((Float*) corrThp_oneD)[itime*GK_Nmoms*4*16*2 + imom*4*16*2 + dir*16*2 + iop*2 + 1] = reduction[imom*16*2 + iop*2 + 1];
}
}//-dir
//---------------------------------------------------------------
free(h_partial_block);
hipFree(d_partial_block);
checkCudaError();
free(reduction);
}
else errorQuda("fixSinkContractions_kernel: Supports only POSITION_SPACE and MOMENTUM_SPACE!\n");
}
void quda::run_fixSinkContractions(void* corrThp_local, void* corrThp_noether,
void* corrThp_oneD, hipTextureObject_t fwdTex,
hipTextureObject_t seqTex, hipTextureObject_t gaugeTex,
WHICHPARTICLE PARTICLE, int partflag, int it,
int isource, int precision, CORR_SPACE CorrSpace){
if(precision == 4)
fixSinkContractions_kernel<float2,float> (corrThp_local, corrThp_noether,
corrThp_oneD, fwdTex, seqTex,
gaugeTex, PARTICLE, partflag,
it, isource, CorrSpace);
else if(precision == 8)
fixSinkContractions_kernel<double2,double>(corrThp_local, corrThp_noether,
corrThp_oneD, fwdTex, seqTex,
gaugeTex, PARTICLE, partflag,
it, isource, CorrSpace);
else errorQuda("run_fixSinkContractions: Precision %d not supported\n",precision);
}
| fcb441da261eb92bff2d799dc0acb905a557bd80.cu | #include <qudaQKXTM.h>
#include <errno.h>
#include <mpi.h>
#include <limits>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <typeinfo>
#include <cuPrintf.cu>
#define THREADS_PER_BLOCK 64
#define PI 3.141592653589793
//#define TIMING_REPORT
using namespace quda;
extern Topology *default_topo;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// $$ Section 2: Constant Refeneces $$
/* block for device constants */
__constant__ bool c_dimBreak[4];
__constant__ int c_nColor;
__constant__ int c_nDim;
__constant__ int c_localL[4];
__constant__ int c_plusGhost[4];
__constant__ int c_minusGhost[4];
__constant__ int c_stride;
__constant__ int c_surface[4];
__constant__ int c_nSpin;
__constant__ double c_alphaAPE;
__constant__ double c_alphaGauss;
__constant__ int c_threads;
__constant__ int c_eps[6][3];
__constant__ int c_sgn_eps[6];
__constant__ int c_procPosition[4];
__constant__ int c_totalL[4];
__constant__ int c_Nmoms;
__constant__ short int c_moms[MAX_NMOMENTA][3];
__constant__ short int c_mesons_indices[10][16][4];
__constant__ short int c_NTN_indices[16][4];
__constant__ short int c_NTR_indices[64][6];
__constant__ short int c_RTN_indices[64][6];
__constant__ short int c_RTR_indices[256][8];
__constant__ short int c_Delta_indices[3][16][4];
__constant__ float c_mesons_values[10][16];
__constant__ float c_NTN_values[16];
__constant__ float c_NTR_values[64];
__constant__ float c_RTN_values[64];
__constant__ float c_RTR_values[256];
__constant__ float c_Delta_values[3][16];
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////
/* Block for global variables */
float GK_deviceMemory = 0.;
int GK_nColor;
int GK_nSpin;
int GK_nDim;
int GK_strideFull;
double GK_alphaAPE;
double GK_alphaGauss;
int GK_localVolume;
int GK_totalVolume;
int GK_nsmearAPE;
int GK_nsmearGauss;
bool GK_dimBreak[QUDAQKXTM_DIM];
int GK_localL[QUDAQKXTM_DIM];
int GK_totalL[QUDAQKXTM_DIM];
int GK_nProc[QUDAQKXTM_DIM];
int GK_plusGhost[QUDAQKXTM_DIM];
int GK_minusGhost[QUDAQKXTM_DIM];
int GK_surface3D[QUDAQKXTM_DIM];
bool GK_init_qudaQKXTM_flag = false;
int GK_Nsources;
int GK_sourcePosition[MAX_NSOURCES][QUDAQKXTM_DIM];
int GK_Nmoms;
short int GK_moms[MAX_NMOMENTA][3];
short int GK_mesons_indices[10][16][4] = {0,0,0,0,0,0,1,1,0,0,2,2,0,0,3,3,1,1,0,0,1,1,1,1,1,1,2,2,1,1,3,3,2,2,0,0,2,2,1,1,2,2,2,2,2,2,3,3,3,3,0,0,3,3,1,1,3,3,2,2,3,3,3,3,0,2,0,2,0,2,1,3,0,2,2,0,0,2,3,1,1,3,0,2,1,3,1,3,1,3,2,0,1,3,3,1,2,0,0,2,2,0,1,3,2,0,2,0,2,0,3,1,3,1,0,2,3,1,1,3,3,1,2,0,3,1,3,1,0,3,0,3,0,3,1,2,0,3,2,1,0,3,3,0,1,2,0,3,1,2,1,2,1,2,2,1,1,2,3,0,2,1,0,3,2,1,1,2,2,1,2,1,2,1,3,0,3,0,0,3,3,0,1,2,3,0,2,1,3,0,3,0,0,3,0,3,0,3,1,2,0,3,2,1,0,3,3,0,1,2,0,3,1,2,1,2,1,2,2,1,1,2,3,0,2,1,0,3,2,1,1,2,2,1,2,1,2,1,3,0,3,0,0,3,3,0,1,2,3,0,2,1,3,0,3,0,0,2,0,2,0,2,1,3,0,2,2,0,0,2,3,1,1,3,0,2,1,3,1,3,1,3,2,0,1,3,3,1,2,0,0,2,2,0,1,3,2,0,2,0,2,0,3,1,3,1,0,2,3,1,1,3,3,1,2,0,3,1,3,1,0,0,0,0,0,0,1,1,0,0,2,2,0,0,3,3,1,1,0,0,1,1,1,1,1,1,2,2,1,1,3,3,2,2,0,0,2,2,1,1,2,2,2,2,2,2,3,3,3,3,0,0,3,3,1,1,3,3,2,2,3,3,3,3,0,1,0,1,0,1,1,0,0,1,2,3,0,1,3,2,1,0,0,1,1,0,1,0,1,0,2,3,1,0,3,2,2,3,0,1,2,3,1,0,2,3,2,3,2,3,3,2,3,2,0,1,3,2,1,0,3,2,2,3,3,2,3,2,0,1,0,1,0,1,1,0,0,1,2,3,0,1,3,2,1,0,0,1,1,0,1,0,1,0,2,3,1,0,3,2,2,3,0,1,2,3,1,0,2,3,2,3,2,3,3,2,3,2,0,1,3,2,1,0,3,2,2,3,3,2,3,2,0,0,0,0,0,0,1,1,0,0,2,2,0,0,3,3,1,1,0,0,1,1,1,1,1,1,2,2,1,1,3,3,2,2,0,0,2,2,1,1,2,2,2,2,2,2,3,3,3,3,0,0,3,3,1,1,3,3,2,2,3,3,3,3,0,2,0,2,0,2,1,3,0,2,2,0,0,2,3,1,1,3,0,2,1,3,1,3,1,3,2,0,1,3,3,1,2,0,0,2,2,0,1,3,2,0,2,0,2,0,3,1,3,1,0,2,3,1,1,3,3,1,2,0,3,1,3,1};
float GK_mesons_values[10][16] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,-1,-1,1,-1,1,1,-1,-1,1,1,-1,1,-1,-1,1,-1,1,1,-1,1,-1,-1,1,1,-1,-1,1,-1,1,1,-1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,-1,-1,1,-1,1,1,-1,-1,1,1,-1,1,-1,-1,1,-1,1,1,-1,1,-1,-1,1,1,-1,-1,1,-1,1,1,-1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1};
short int GK_NTN_indices[16][4] = {0,1,0,1,0,1,1,0,0,1,2,3,0,1,3,2,1,0,0,1,1,0,1,0,1,0,2,3,1,0,3,2,2,3,0,1,2,3,1,0,2,3,2,3,2,3,3,2,3,2,0,1,3,2,1,0,3,2,2,3,3,2,3,2};
float GK_NTN_values[16] = {-1,1,-1,1,1,-1,1,-1,-1,1,-1,1,1,-1,1,-1};
short int GK_NTR_indices[64][6] = {0,1,0,3,0,2,0,1,0,3,1,3,0,1,0,3,2,0,0,1,0,3,3,1,0,1,1,2,0,2,0,1,1,2,1,3,0,1,1,2,2,0,0,1,1,2,3,1,0,1,2,1,0,2,0,1,2,1,1,3,0,1,2,1,2,0,0,1,2,1,3,1,0,1,3,0,0,2,0,1,3,0,1,3,0,1,3,0,2,0,0,1,3,0,3,1,1,0,0,3,0,2,1,0,0,3,1,3,1,0,0,3,2,0,1,0,0,3,3,1,1,0,1,2,0,2,1,0,1,2,1,3,1,0,1,2,2,0,1,0,1,2,3,1,1,0,2,1,0,2,1,0,2,1,1,3,1,0,2,1,2,0,1,0,2,1,3,1,1,0,3,0,0,2,1,0,3,0,1,3,1,0,3,0,2,0,1,0,3,0,3,1,2,3,0,3,0,2,2,3,0,3,1,3,2,3,0,3,2,0,2,3,0,3,3,1,2,3,1,2,0,2,2,3,1,2,1,3,2,3,1,2,2,0,2,3,1,2,3,1,2,3,2,1,0,2,2,3,2,1,1,3,2,3,2,1,2,0,2,3,2,1,3,1,2,3,3,0,0,2,2,3,3,0,1,3,2,3,3,0,2,0,2,3,3,0,3,1,3,2,0,3,0,2,3,2,0,3,1,3,3,2,0,3,2,0,3,2,0,3,3,1,3,2,1,2,0,2,3,2,1,2,1,3,3,2,1,2,2,0,3,2,1,2,3,1,3,2,2,1,0,2,3,2,2,1,1,3,3,2,2,1,2,0,3,2,2,1,3,1,3,2,3,0,0,2,3,2,3,0,1,3,3,2,3,0,2,0,3,2,3,0,3,1};
float GK_NTR_values[64] = {1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,1,1,1,1};
short int GK_RTN_indices[64][6] = {0,3,0,1,0,2,0,3,0,1,1,3,0,3,0,1,2,0,0,3,0,1,3,1,0,3,1,0,0,2,0,3,1,0,1,3,0,3,1,0,2,0,0,3,1,0,3,1,0,3,2,3,0,2,0,3,2,3,1,3,0,3,2,3,2,0,0,3,2,3,3,1,0,3,3,2,0,2,0,3,3,2,1,3,0,3,3,2,2,0,0,3,3,2,3,1,1,2,0,1,0,2,1,2,0,1,1,3,1,2,0,1,2,0,1,2,0,1,3,1,1,2,1,0,0,2,1,2,1,0,1,3,1,2,1,0,2,0,1,2,1,0,3,1,1,2,2,3,0,2,1,2,2,3,1,3,1,2,2,3,2,0,1,2,2,3,3,1,1,2,3,2,0,2,1,2,3,2,1,3,1,2,3,2,2,0,1,2,3,2,3,1,2,1,0,1,0,2,2,1,0,1,1,3,2,1,0,1,2,0,2,1,0,1,3,1,2,1,1,0,0,2,2,1,1,0,1,3,2,1,1,0,2,0,2,1,1,0,3,1,2,1,2,3,0,2,2,1,2,3,1,3,2,1,2,3,2,0,2,1,2,3,3,1,2,1,3,2,0,2,2,1,3,2,1,3,2,1,3,2,2,0,2,1,3,2,3,1,3,0,0,1,0,2,3,0,0,1,1,3,3,0,0,1,2,0,3,0,0,1,3,1,3,0,1,0,0,2,3,0,1,0,1,3,3,0,1,0,2,0,3,0,1,0,3,1,3,0,2,3,0,2,3,0,2,3,1,3,3,0,2,3,2,0,3,0,2,3,3,1,3,0,3,2,0,2,3,0,3,2,1,3,3,0,3,2,2,0,3,0,3,2,3,1};
float GK_RTN_values[64] = {-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1};
short int GK_RTR_indices[256][8] = {0,3,0,3,0,2,0,2,0,3,0,3,0,2,1,3,0,3,0,3,0,2,2,0,0,3,0,3,0,2,3,1,0,3,0,3,1,3,0,2,0,3,0,3,1,3,1,3,0,3,0,3,1,3,2,0,0,3,0,3,1,3,3,1,0,3,0,3,2,0,0,2,0,3,0,3,2,0,1,3,0,3,0,3,2,0,2,0,0,3,0,3,2,0,3,1,0,3,0,3,3,1,0,2,0,3,0,3,3,1,1,3,0,3,0,3,3,1,2,0,0,3,0,3,3,1,3,1,0,3,1,2,0,2,0,2,0,3,1,2,0,2,1,3,0,3,1,2,0,2,2,0,0,3,1,2,0,2,3,1,0,3,1,2,1,3,0,2,0,3,1,2,1,3,1,3,0,3,1,2,1,3,2,0,0,3,1,2,1,3,3,1,0,3,1,2,2,0,0,2,0,3,1,2,2,0,1,3,0,3,1,2,2,0,2,0,0,3,1,2,2,0,3,1,0,3,1,2,3,1,0,2,0,3,1,2,3,1,1,3,0,3,1,2,3,1,2,0,0,3,1,2,3,1,3,1,0,3,2,1,0,2,0,2,0,3,2,1,0,2,1,3,0,3,2,1,0,2,2,0,0,3,2,1,0,2,3,1,0,3,2,1,1,3,0,2,0,3,2,1,1,3,1,3,0,3,2,1,1,3,2,0,0,3,2,1,1,3,3,1,0,3,2,1,2,0,0,2,0,3,2,1,2,0,1,3,0,3,2,1,2,0,2,0,0,3,2,1,2,0,3,1,0,3,2,1,3,1,0,2,0,3,2,1,3,1,1,3,0,3,2,1,3,1,2,0,0,3,2,1,3,1,3,1,0,3,3,0,0,2,0,2,0,3,3,0,0,2,1,3,0,3,3,0,0,2,2,0,0,3,3,0,0,2,3,1,0,3,3,0,1,3,0,2,0,3,3,0,1,3,1,3,0,3,3,0,1,3,2,0,0,3,3,0,1,3,3,1,0,3,3,0,2,0,0,2,0,3,3,0,2,0,1,3,0,3,3,0,2,0,2,0,0,3,3,0,2,0,3,1,0,3,3,0,3,1,0,2,0,3,3,0,3,1,1,3,0,3,3,0,3,1,2,0,0,3,3,0,3,1,3,1,1,2,0,3,0,2,0,2,1,2,0,3,0,2,1,3,1,2,0,3,0,2,2,0,1,2,0,3,0,2,3,1,1,2,0,3,1,3,0,2,1,2,0,3,1,3,1,3,1,2,0,3,1,3,2,0,1,2,0,3,1,3,3,1,1,2,0,3,2,0,0,2,1,2,0,3,2,0,1,3,1,2,0,3,2,0,2,0,1,2,0,3,2,0,3,1,1,2,0,3,3,1,0,2,1,2,0,3,3,1,1,3,1,2,0,3,3,1,2,0,1,2,0,3,3,1,3,1,1,2,1,2,0,2,0,2,1,2,1,2,0,2,1,3,1,2,1,2,0,2,2,0,1,2,1,2,0,2,3,1,1,2,1,2,1,3,0,2,1,2,1,2,1,3,1,3,1,2,1,2,1,3,2,0,1,2,1,2,1,3,3,1,1,2,1,2,2,0,0,2,1,2,1,2,2,0,1,3,1,2,1,2,2,0,2,0,1,2,1,2,2,0,3,1,1,2,1,2,3,1,0,2,1,2,1,2,3,1,1,3,1,2,1,2,3,1,2,0,1,2,1,2,3,1,3,1,1,2,2,1,0,2,0,2,1,2,2,1,0,2,1,3,1,2,2,1,0,2,2,0,1,2,2,1,0,2,3,1,1,2,2,1,1,3,0,2,1,2,2,1,1,3,1,3,1,2,2,1,1,3,2,0,1,2,2,1,1,3,3,1,1,2,2,1,2,0,0,2,1,2,2,1,2,0,1,3,1,2,2,1,2,0,2,0,1,2,2,1,2,0,3,1,1,2,2,1,3,1,0,2,1,2,2,1,3,1,1,3,1,2,2,1,3,1,2,0,1,2,2,1,3,1,3,1,1,2,3,0,0,2,0,2,1,2,3,0,0,2,1,3,1,2,3,0,0,2,2,0,1,2,3,0,0,2,3,1,1,2,3,0,1,3,0,2,1,2,3,0,1,3,1,3,1,2,3,0,1,3,2,0,1,2,3,0,1,3,3,1,1,2,3,0,2,0,0,2,1,2,3,0,2,0,1,3,1,2,3,0,2,0,2,0,1,2,3,0,2,0,3,1,1,2,3,0,3,1,0,2,1,2,3,0,3,1,1,3,1,2,3,0,3,1,2,0,1,2,3,0,3,1,3,1,2,1,0,3,0,2,0,2,2,1,0,3,0,2,1,3,2,1,0,3,0,2,2,0,2,1,0,3,0,2,3,1,2,1,0,3,1,3,0,2,2,1,0,3,1,3,1,3,2,1,0,3,1,3,2,0,2,1,0,3,1,3,3,1,2,1,0,3,2,0,0,2,2,1,0,3,2,0,1,3,2,1,0,3,2,0,2,0,2,1,0,3,2,0,3,1,2,1,0,3,3,1,0,2,2,1,0,3,3,1,1,3,2,1,0,3,3,1,2,0,2,1,0,3,3,1,3,1,2,1,1,2,0,2,0,2,2,1,1,2,0,2,1,3,2,1,1,2,0,2,2,0,2,1,1,2,0,2,3,1,2,1,1,2,1,3,0,2,2,1,1,2,1,3,1,3,2,1,1,2,1,3,2,0,2,1,1,2,1,3,3,1,2,1,1,2,2,0,0,2,2,1,1,2,2,0,1,3,2,1,1,2,2,0,2,0,2,1,1,2,2,0,3,1,2,1,1,2,3,1,0,2,2,1,1,2,3,1,1,3,2,1,1,2,3,1,2,0,2,1,1,2,3,1,3,1,2,1,2,1,0,2,0,2,2,1,2,1,0,2,1,3,2,1,2,1,0,2,2,0,2,1,2,1,0,2,3,1,2,1,2,1,1,3,0,2,2,1,2,1,1,3,1,3,2,1,2,1,1,3,2,0,2,1,2,1,1,3,3,1,2,1,2,1,2,0,0,2,2,1,2,1,2,0,1,3,2,1,2,1,2,0,2,0,2,1,2,1,2,0,3,1,2,1,2,1,3,1,0,2,2,1,2,1,3,1,1,3,2,1,2,1,3,1,2,0,2,1,2,1,3,1,3,1,2,1,3,0,0,2,0,2,2,1,3,0,0,2,1,3,2,1,3,0,0,2,2,0,2,1,3,0,0,2,3,1,2,1,3,0,1,3,0,2,2,1,3,0,1,3,1,3,2,1,3,0,1,3,2,0,2,1,3,0,1,3,3,1,2,1,3,0,2,0,0,2,2,1,3,0,2,0,1,3,2,1,3,0,2,0,2,0,2,1,3,0,2,0,3,1,2,1,3,0,3,1,0,2,2,1,3,0,3,1,1,3,2,1,3,0,3,1,2,0,2,1,3,0,3,1,3,1,3,0,0,3,0,2,0,2,3,0,0,3,0,2,1,3,3,0,0,3,0,2,2,0,3,0,0,3,0,2,3,1,3,0,0,3,1,3,0,2,3,0,0,3,1,3,1,3,3,0,0,3,1,3,2,0,3,0,0,3,1,3,3,1,3,0,0,3,2,0,0,2,3,0,0,3,2,0,1,3,3,0,0,3,2,0,2,0,3,0,0,3,2,0,3,1,3,0,0,3,3,1,0,2,3,0,0,3,3,1,1,3,3,0,0,3,3,1,2,0,3,0,0,3,3,1,3,1,3,0,1,2,0,2,0,2,3,0,1,2,0,2,1,3,3,0,1,2,0,2,2,0,3,0,1,2,0,2,3,1,3,0,1,2,1,3,0,2,3,0,1,2,1,3,1,3,3,0,1,2,1,3,2,0,3,0,1,2,1,3,3,1,3,0,1,2,2,0,0,2,3,0,1,2,2,0,1,3,3,0,1,2,2,0,2,0,3,0,1,2,2,0,3,1,3,0,1,2,3,1,0,2,3,0,1,2,3,1,1,3,3,0,1,2,3,1,2,0,3,0,1,2,3,1,3,1,3,0,2,1,0,2,0,2,3,0,2,1,0,2,1,3,3,0,2,1,0,2,2,0,3,0,2,1,0,2,3,1,3,0,2,1,1,3,0,2,3,0,2,1,1,3,1,3,3,0,2,1,1,3,2,0,3,0,2,1,1,3,3,1,3,0,2,1,2,0,0,2,3,0,2,1,2,0,1,3,3,0,2,1,2,0,2,0,3,0,2,1,2,0,3,1,3,0,2,1,3,1,0,2,3,0,2,1,3,1,1,3,3,0,2,1,3,1,2,0,3,0,2,1,3,1,3,1,3,0,3,0,0,2,0,2,3,0,3,0,0,2,1,3,3,0,3,0,0,2,2,0,3,0,3,0,0,2,3,1,3,0,3,0,1,3,0,2,3,0,3,0,1,3,1,3,3,0,3,0,1,3,2,0,3,0,3,0,1,3,3,1,3,0,3,0,2,0,0,2,3,0,3,0,2,0,1,3,3,0,3,0,2,0,2,0,3,0,3,0,2,0,3,1,3,0,3,0,3,1,0,2,3,0,3,0,3,1,1,3,3,0,3,0,3,1,2,0,3,0,3,0,3,1,3,1};
float GK_RTR_values[256] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
short int GK_Delta_indices[3][16][4] = {0,0,0,0,0,0,1,1,0,0,2,2,0,0,3,3,1,1,0,0,1,1,1,1,1,1,2,2,1,1,3,3,2,2,0,0,2,2,1,1,2,2,2,2,2,2,3,3,3,3,0,0,3,3,1,1,3,3,2,2,3,3,3,3,0,0,0,0,0,0,1,1,0,0,2,2,0,0,3,3,1,1,0,0,1,1,1,1,1,1,2,2,1,1,3,3,2,2,0,0,2,2,1,1,2,2,2,2,2,2,3,3,3,3,0,0,3,3,1,1,3,3,2,2,3,3,3,3,0,1,0,1,0,1,1,0,0,1,2,3,0,1,3,2,1,0,0,1,1,0,1,0,1,0,2,3,1,0,3,2,2,3,0,1,2,3,1,0,2,3,2,3,2,3,3,2,3,2,0,1,3,2,1,0,3,2,2,3,3,2,3,2};
float GK_Delta_values[3][16] = {1,-1,-1,1,-1,1,1,-1,-1,1,1,-1,1,-1,-1,1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1};
// for mpi use global variables
MPI_Group GK_fullGroup , GK_spaceGroup , GK_timeGroup;
MPI_Comm GK_spaceComm , GK_timeComm;
int GK_localRank;
int GK_localSize;
int GK_timeRank;
int GK_timeSize;
//////////////////////////////////////////////////
static void createMomenta(int Q_sq){
int counter=0;
for(int iQ = 0 ; iQ <= Q_sq ; iQ++){
for(int nx = iQ ; nx >= -iQ ; nx--){
for(int ny = iQ ; ny >= -iQ ; ny--){
for(int nz = iQ ; nz >= -iQ ; nz--){
if( nx*nx + ny*ny + nz*nz == iQ ){
GK_moms[counter][0] = nx;
GK_moms[counter][1] = ny;
GK_moms[counter][2] = nz;
counter++;
}
}
}
}
}
if(counter > MAX_NMOMENTA)errorQuda("Error exceeded max number of momenta\n");
GK_Nmoms=counter;
}
void quda::init_qudaQKXTM(qudaQKXTMinfo *info){
if(GK_init_qudaQKXTM_flag == false){
GK_nColor = 3;
GK_nSpin = 4;
GK_nDim = QUDAQKXTM_DIM;
GK_alphaAPE = info->alphaAPE;
GK_alphaGauss = info->alphaGauss;
GK_nsmearAPE = info->nsmearAPE;
GK_nsmearGauss = info->nsmearGauss;
createMomenta(info->Q_sq);
// from now on depends on lattice and break format we choose
for(int i = 0 ; i < GK_nDim ; i++)
GK_nProc[i] = comm_dim(i);
for(int i = 0 ; i < GK_nDim ; i++){ // take local and total lattice
GK_localL[i] = info->lL[i];
GK_totalL[i] = GK_nProc[i] * GK_localL[i];
}
GK_localVolume = 1;
GK_totalVolume = 1;
for(int i = 0 ; i < GK_nDim ; i++){
GK_localVolume *= GK_localL[i];
GK_totalVolume *= GK_totalL[i];
}
GK_strideFull = GK_localVolume;
for (int i=0; i<GK_nDim; i++) {
GK_surface3D[i] = 1;
for (int j=0; j<GK_nDim; j++) {
if (i==j) continue;
GK_surface3D[i] *= GK_localL[j];
}
}
for(int i = 0 ; i < GK_nDim ; i++)
if( GK_localL[i] == GK_totalL[i] )
GK_surface3D[i] = 0;
for(int i = 0 ; i < GK_nDim ; i++){
GK_plusGhost[i] =0;
GK_minusGhost[i] = 0;
}
#ifdef MULTI_GPU
int lastIndex = GK_localVolume;
for(int i = 0 ; i < GK_nDim ; i++)
if( GK_localL[i] < GK_totalL[i] ){
GK_plusGhost[i] = lastIndex ;
GK_minusGhost[i] = lastIndex + GK_surface3D[i];
lastIndex += 2*GK_surface3D[i];
}
#endif
for(int i = 0 ; i < GK_nDim ; i++){
if( GK_localL[i] < GK_totalL[i])
GK_dimBreak[i] = true;
else
GK_dimBreak[i] = false;
}
const int eps[6][3]=
{
{0,1,2},
{2,0,1},
{1,2,0},
{2,1,0},
{0,2,1},
{1,0,2}
};
const int sgn_eps[6]=
{
+1,+1,+1,-1,-1,-1
};
int procPosition[4];
for(int i= 0 ; i < 4 ; i++)
procPosition[i] = comm_coords(default_topo)[i];
// put it zero but change it later
GK_Nsources = info->Nsources;
if(GK_Nsources > MAX_NSOURCES) errorQuda("Error you exceeded maximum number of source position\n");
for(int is = 0 ; is < GK_Nsources ; is++)
for(int i = 0 ; i < 4 ; i++)
GK_sourcePosition[is][i] = info->sourcePosition[is][i];
// initialization consist also from define device constants
cudaMemcpyToSymbol(c_nColor, &GK_nColor, sizeof(int) );
cudaMemcpyToSymbol(c_nSpin, &GK_nSpin, sizeof(int) );
cudaMemcpyToSymbol(c_nDim, &GK_nDim, sizeof(int) );
cudaMemcpyToSymbol(c_stride, &GK_strideFull, sizeof(int) );
cudaMemcpyToSymbol(c_alphaAPE, &GK_alphaAPE , sizeof(double) );
cudaMemcpyToSymbol(c_alphaGauss, &GK_alphaGauss , sizeof(double) );
cudaMemcpyToSymbol(c_threads , &GK_localVolume , sizeof(double) ); // may change
cudaMemcpyToSymbol(c_dimBreak , GK_dimBreak , QUDAQKXTM_DIM*sizeof(bool) );
cudaMemcpyToSymbol(c_localL , GK_localL , QUDAQKXTM_DIM*sizeof(int) );
cudaMemcpyToSymbol(c_totalL , GK_totalL , QUDAQKXTM_DIM*sizeof(int) );
cudaMemcpyToSymbol(c_plusGhost , GK_plusGhost , QUDAQKXTM_DIM*sizeof(int) );
cudaMemcpyToSymbol(c_minusGhost , GK_minusGhost , QUDAQKXTM_DIM*sizeof(int) );
cudaMemcpyToSymbol(c_surface , GK_surface3D , QUDAQKXTM_DIM*sizeof(int) );
cudaMemcpyToSymbol(c_eps, &(eps[0][0]) , 6*3*sizeof(int) );
cudaMemcpyToSymbol(c_sgn_eps, sgn_eps , 6*sizeof(int) );
cudaMemcpyToSymbol(c_procPosition, procPosition, QUDAQKXTM_DIM*sizeof(int));
cudaMemcpyToSymbol(c_Nmoms, &GK_Nmoms, sizeof(int));
cudaMemcpyToSymbol(c_moms, GK_moms, MAX_NMOMENTA*3*sizeof(short int));
cudaMemcpyToSymbol(c_mesons_indices,GK_mesons_indices,10*16*4*sizeof(short int));
cudaMemcpyToSymbol(c_NTN_indices,GK_NTN_indices,16*4*sizeof(short int));
cudaMemcpyToSymbol(c_NTR_indices,GK_NTR_indices,64*6*sizeof(short int));
cudaMemcpyToSymbol(c_RTN_indices,GK_RTN_indices,64*6*sizeof(short int));
cudaMemcpyToSymbol(c_RTR_indices,GK_RTR_indices,256*8*sizeof(short int));
cudaMemcpyToSymbol(c_Delta_indices,GK_Delta_indices,3*16*4*sizeof(short int));
cudaMemcpyToSymbol(c_mesons_values,GK_mesons_values,10*16*sizeof(float));
cudaMemcpyToSymbol(c_NTN_values,GK_NTN_values,16*sizeof(float));
cudaMemcpyToSymbol(c_NTR_values,GK_NTR_values,64*sizeof(float));
cudaMemcpyToSymbol(c_RTN_values,GK_RTN_values,64*sizeof(float));
cudaMemcpyToSymbol(c_RTR_values,GK_RTR_values,256*sizeof(float));
cudaMemcpyToSymbol(c_Delta_values,GK_Delta_values,3*16*sizeof(float));
checkCudaError();
// create groups of process to use mpi reduce only on spatial points
MPI_Comm_group(MPI_COMM_WORLD, &GK_fullGroup);
int space3D_proc;
space3D_proc = GK_nProc[0] * GK_nProc[1] * GK_nProc[2];
int *ranks = (int*) malloc(space3D_proc*sizeof(int));
for(int i= 0 ; i < space3D_proc ; i++)
ranks[i] = comm_coords(default_topo)[3] + GK_nProc[3]*i;
// for(int i= 0 ; i < space3D_proc ; i++)
// printf("%d (%d,%d,%d,%d)\n",comm_rank(),comm_coords(default_topo)[0],comm_coords(default_topo)[1],comm_coords(default_topo)[2],comm_coords(default_topo)[3]);
// for(int i= 0 ; i < space3D_proc ; i++)
//printf("%d %d\n",comm_rank(),ranks[i]);
MPI_Group_incl(GK_fullGroup,space3D_proc,ranks,&GK_spaceGroup);
MPI_Group_rank(GK_spaceGroup,&GK_localRank);
MPI_Group_size(GK_spaceGroup,&GK_localSize);
MPI_Comm_create(MPI_COMM_WORLD, GK_spaceGroup , &GK_spaceComm);
//if(GK_spaceComm == MPI_COMM_NULL) printf("NULL %d\n",comm_rank());
//exit(-1);
// create group of process to use mpi gather
int *ranksTime = (int*) malloc(GK_nProc[3]*sizeof(int));
for(int i=0 ; i < GK_nProc[3] ; i++)
ranksTime[i] = i;
MPI_Group_incl(GK_fullGroup,GK_nProc[3], ranksTime, &GK_timeGroup);
MPI_Group_rank(GK_timeGroup, &GK_timeRank);
MPI_Group_size(GK_timeGroup, &GK_timeSize);
MPI_Comm_create(MPI_COMM_WORLD, GK_timeGroup, &GK_timeComm);
//////////////////////////////////////////////////////////////////////////////
free(ranks);
free(ranksTime);
GK_init_qudaQKXTM_flag = true;
printfQuda("qudaQKXTM has been initialized\n");
}
else{
printfQuda("???\n");
return;
}
}
void quda::printf_qudaQKXTM(){
if(GK_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
printfQuda("Number of colors is %d\n",GK_nColor);
printfQuda("Number of spins is %d\n",GK_nSpin);
printfQuda("Number of dimensions is %d\n",GK_nDim);
printfQuda("Number of process in each direction is (x,y,z,t) %d x %d x %d x %d\n",GK_nProc[0],GK_nProc[1],GK_nProc[2],GK_nProc[3]);
printfQuda("Total lattice is (x,y,z,t) %d x %d x %d x %d\n",GK_totalL[0],GK_totalL[1],GK_totalL[2],GK_totalL[3]);
printfQuda("Local lattice is (x,y,z,t) %d x %d x %d x %d\n",GK_localL[0],GK_localL[1],GK_localL[2],GK_localL[3]);
printfQuda("Total volume is %d\n",GK_totalVolume);
printfQuda("Local volume is %d\n",GK_localVolume);
printfQuda("Surface is (x,y,z,t) ( %d , %d , %d , %d)\n",GK_surface3D[0],GK_surface3D[1],GK_surface3D[2],GK_surface3D[3]);
printfQuda("The plus Ghost points in directions (x,y,z,t) ( %d , %d , %d , %d )\n",GK_plusGhost[0],GK_plusGhost[1],GK_plusGhost[2],GK_plusGhost[3]);
printfQuda("The Minus Ghost points in directixons (x,y,z,t) ( %d , %d , %d , %d )\n",GK_minusGhost[0],GK_minusGhost[1],GK_minusGhost[2],GK_minusGhost[3]);
printfQuda("For APE smearing we use nsmear = %d , alpha = %lf\n",GK_nsmearAPE,GK_alphaAPE);
printfQuda("For Gauss smearing we use nsmear = %d , alpha = %lf\n",GK_nsmearGauss,GK_alphaGauss);
printfQuda("I got %d source positions to work on\n",GK_Nsources);
printfQuda("I got %d number of momenta to work on\n",GK_Nmoms);
}
static __inline__ __device__ double2 fetch_double2(cudaTextureObject_t t, int i)
{
int4 v =tex1Dfetch<int4>(t,i);
return make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z));
}
static __inline__ __device__ float2 fetch_float2(cudaTextureObject_t t, int i)
{
float2 v = tex1Dfetch<float2>(t,i);
return v;
}
template<typename Float2>
__device__ inline Float2 operator*(const Float2 a, const Float2 b){
Float2 res;
res.x = a.x*b.x - a.y*b.y;
res.y = a.x*b.y + a.y*b.x;
return res;
}
/*
template<typename Float2, typename Float>
__device__ inline Float2 operator*(const Float a , const Float2 b){
Float2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
*/
__device__ inline float2 operator*(const float a , const float2 b){
float2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
__device__ inline double2 operator*(const double a , const double2 b){
double2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
template<typename Float2>
__device__ inline Float2 operator*(const int a , const Float2 b){
Float2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
template<typename Float2>
__device__ inline Float2 operator+(const Float2 a, const Float2 b){
Float2 res;
res.x = a.x + b.x;
res.y = a.y + b.y;
return res;
}
template<typename Float2>
__device__ inline Float2 operator-(const Float2 a, const Float2 b){
Float2 res;
res.x = a.x - b.x;
res.y = a.y - b.y;
return res;
}
template<typename Float2>
__device__ inline Float2 conj(const Float2 a){
Float2 res;
res.x = a.x;
res.y = -a.y;
return res;
}
__device__ inline float norm(const float2 a){
float res;
res = sqrt(a.x*a.x + a.y*a.y);
return res;
}
__device__ inline double norm(const double2 a){
double res;
res = sqrt(a.x*a.x + a.y*a.y);
return res;
}
template<typename Float2>
__device__ inline Float2 get_Projector(Float2 projector[4][4],
WHICHPARTICLE PARTICLE,
WHICHPROJECTOR PID){
// important Projectors must be in twisted basis
#include <projectors_tm_base.h>
}
template<typename Float2>
__device__ inline Float2 get_Operator(Float2 gamma[4][4], int flag,
WHICHPARTICLE TESTPARTICLE,
int partFlag){
#include <gammas_tm_base.h>
}
#include <core_def.h>
__global__ void calculatePlaq_kernel_double(cudaTextureObject_t gaugeTexPlaq,
double *partial_plaq){
#define FLOAT2 double2
#define FLOAT double
#define READGAUGE_FLOAT READGAUGE_double
#include <plaquette_core.h>
#undef FLOAT2
#undef FLOAT
#undef READGAUGE_FLOAT
}
__global__ void calculatePlaq_kernel_float(cudaTextureObject_t gaugeTexPlaq,
float *partial_plaq){
#define FLOAT2 float2
#define FLOAT float
#define READGAUGE_FLOAT READGAUGE_float
#include <plaquette_core.h>
#undef READGAUGE_FLOAT
#undef FLOAT2
#undef FLOAT
}
__global__ void gaussianSmearing_kernel_float(float2* out,
cudaTextureObject_t vecInTex,
cudaTextureObject_t gaugeTex ){
#define FLOAT2 float2
#define READGAUGE_FLOAT READGAUGE_float
#define READVECTOR_FLOAT READVECTOR_float
#include <Gauss_core.h>
#undef READGAUGE_FLOAT
#undef READVECTOR_FLOAT
#undef FLOAT2
}
__global__ void gaussianSmearing_kernel_double(double2* out,
cudaTextureObject_t vecInTex,
cudaTextureObject_t gaugeTex ){
#define FLOAT2 double2
#define READGAUGE_FLOAT READGAUGE_double
#define READVECTOR_FLOAT READVECTOR_double
#include <Gauss_core.h>
#undef READGAUGE_FLOAT
#undef READVECTOR_FLOAT
#undef FLOAT2
}
__global__ void contractMesons_kernel_float(float2* block,
cudaTextureObject_t prop1Tex,
cudaTextureObject_t prop2Tex,
int it, int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <contractMesons_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void contractMesons_kernel_PosSpace_float(float2* block,
cudaTextureObject_t prop1Tex,
cudaTextureObject_t prop2Tex,
int it, int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <contractMesons_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void contractMesons_kernel_double(double2* block,
cudaTextureObject_t prop1Tex,
cudaTextureObject_t prop2Tex,
int it, int x0, int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <contractMesons_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void contractBaryons_kernel_float(float2* block,
cudaTextureObject_t prop1Tex,
cudaTextureObject_t prop2Tex,
int it, int x0, int y0, int z0, int ip){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <contractBaryons_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void contractBaryons_kernel_PosSpace_float(float2* block,
cudaTextureObject_t prop1Tex,
cudaTextureObject_t prop2Tex,
int it, int x0, int y0, int z0, int ip){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <contractBaryons_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
/*
__global__ void contractBaryons_kernel_double(double2* block, cudaTextureObject_t prop1Tex, cudaTextureObject_t prop2Tex,int it, int x0, int y0, int z0, int ip){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <contractBaryons_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
*/
__global__ void seqSourceFixSinkPart1_kernel_float(float2* out, int timeslice,
cudaTextureObject_t tex1,
cudaTextureObject_t tex2,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <seqSourceFixSinkPart1_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void seqSourceFixSinkPart2_kernel_float(float2* out,
int timeslice,
cudaTextureObject_t tex,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <seqSourceFixSinkPart2_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void seqSourceFixSinkPart1_kernel_double(double2* out,
int timeslice,
cudaTextureObject_t tex1,
cudaTextureObject_t tex2,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <seqSourceFixSinkPart1_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void seqSourceFixSinkPart2_kernel_double(double2* out,
int timeslice,
cudaTextureObject_t tex,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <seqSourceFixSinkPart2_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
//- Fix Sink kernels, ultra-local
__global__ void fixSinkContractions_local_kernel_float(float2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_local_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_local_kernel_PosSpace_float(float2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_local_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_local_kernel_double(double2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_local_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_local_kernel_PosSpace_double(double2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_local_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
//-----------------------------------------------
//- Fix Sink kernels, noether
__global__ void fixSinkContractions_noether_kernel_float(float2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
cudaTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_noether_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_noether_kernel_PosSpace_float(float2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
cudaTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_noether_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_noether_kernel_double(double2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
cudaTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_noether_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_noether_kernel_PosSpace_double(double2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
cudaTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int x0, int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_noether_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
//-----------------------------------------------
//- Fix Sink kernels, one-derivative
__global__ void fixSinkContractions_oneD_kernel_float(float2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
cudaTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int dir, int x0,
int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_oneD_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_oneD_kernel_PosSpace_float(float2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
cudaTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int dir, int x0,
int y0, int z0){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <fixSinkContractions_oneD_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_oneD_kernel_double(double2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
cudaTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int dir, int x0,
int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_oneD_core.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
__global__ void fixSinkContractions_oneD_kernel_PosSpace_double(double2* block,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
cudaTextureObject_t gaugeTex,
WHICHPARTICLE TESTPARTICLE,
int partflag, int it,
int dir, int x0,
int y0, int z0){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <fixSinkContractions_oneD_core_PosSpace.h>
#undef FETCH_FLOAT2
#undef FLOAT2
#undef FLOAT
}
//-----------------------------------------------
template<typename Float, typename Float2>
__global__ void scaleVector_kernel(Float a, Float2* inOut){
#include <scaleVector_core.h>
}
template<typename Float2>
__global__ void uploadToCuda_kernel(Float2 *in, double2 *outEven, double2 *outOdd){
#include <uploadToCuda_core.h>
}
template<typename Float2>
__global__ void downloadFromCuda_kernel(Float2 *out, double2 *inEven, double2 *inOdd){
#include <downloadFromCuda_core.h>
}
template<typename Float2>
__global__ void rotateToPhysicalBase_kernel(Float2 *inOut, int sign){
#include <rotateToPhysicalBase_core.h>
}
__global__ void castDoubleToFloat_kernel(float2 *out, double2 *in){
#include <castDoubleToFloat_core.h>
}
__global__ void castFloatToDouble_kernel(double2 *out, float2 *in){
#include <castFloatToDouble_core.h>
}
template<typename Float2>
__global__ void conjugate_vector_kernel(Float2 *inOut){
#include <conjugate_vector_core.h>
}
template<typename Float2>
__global__ void apply_gamma5_vector_kernel(Float2 *inOut){
#include <apply_gamma5_vector_core.h>
}
template<typename Float2>
__global__ void conjugate_propagator_kernel(Float2 *inOut){
#include <conjugate_propagator_core.h>
}
template<typename Float2>
__global__ void apply_gamma5_propagator_kernel(Float2 *inOut){
#include <apply_gamma5_propagator_core.h>
}
template<typename Float>
static Float calculatePlaq_kernel(cudaTextureObject_t gaugeTexPlaq){
Float plaquette = 0.;
Float globalPlaquette = 0.;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
Float *h_partial_plaq = NULL;
Float *d_partial_plaq = NULL;
h_partial_plaq = (Float*) malloc(gridDim.x * sizeof(Float) );
if(h_partial_plaq == NULL) errorQuda("Error allocate memory for host partial plaq");
cudaMalloc((void**)&d_partial_plaq, gridDim.x * sizeof(Float));
#ifdef TIMING_REPORT
cudaEvent_t start,stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
#endif
if( typeid(Float) == typeid(float) )
calculatePlaq_kernel_float<<<gridDim,blockDim>>>(gaugeTexPlaq,(float*) d_partial_plaq);
else if(typeid(Float) == typeid(double))
calculatePlaq_kernel_double<<<gridDim,blockDim>>>(gaugeTexPlaq,(double*) d_partial_plaq);
#ifdef TIMING_REPORT
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printfQuda("Elapsed time for plaquette kernel is %f ms\n",elapsedTime);
#endif
cudaMemcpy(h_partial_plaq, d_partial_plaq , gridDim.x * sizeof(Float) , cudaMemcpyDeviceToHost);
for(int i = 0 ; i < gridDim.x ; i++)
plaquette += h_partial_plaq[i];
free(h_partial_plaq);
cudaFree(d_partial_plaq);
checkCudaError();
int rc;
if(typeid(Float) == typeid(double))
rc = MPI_Allreduce(&plaquette , &globalPlaquette , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
else if( typeid(Float) == typeid(float) )
rc = MPI_Allreduce(&plaquette , &globalPlaquette , 1 , MPI_FLOAT , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for plaquette");
return globalPlaquette/(GK_totalVolume*GK_nColor*6);
}
void quda::run_calculatePlaq_kernel(cudaTextureObject_t gaugeTexPlaq,
int precision){
if(precision == 4){
float plaq = calculatePlaq_kernel<float>(gaugeTexPlaq);
printfQuda("Calculated plaquette in single precision is %f\n",plaq);
}
else if(precision == 8){
double plaq = calculatePlaq_kernel<double>(gaugeTexPlaq);
printfQuda("Calculated plaquette in double precision is %lf\n",plaq);
}
else{
errorQuda("Precision not supported\n");
}
}
template<typename Float>
static void gaussianSmearing_kernel(void* out,
cudaTextureObject_t vecInTex,
cudaTextureObject_t gaugeTex){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
#ifdef TIMING_REPORT
cudaEvent_t start,stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
#endif
if( typeid(Float) == typeid(float) )
gaussianSmearing_kernel_float<<<gridDim,blockDim>>>((float2*) out, vecInTex, gaugeTex);
else if(typeid(Float) == typeid(double))
gaussianSmearing_kernel_double<<<gridDim,blockDim>>>((double2*) out, vecInTex, gaugeTex);
checkCudaError();
#ifdef TIMING_REPORT
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printfQuda("Elapsed time for 1 step in gaussian smearing is %f ms\n",elapsedTime);
#endif
}
void quda::run_GaussianSmearing(void* out,
cudaTextureObject_t vecInTex,
cudaTextureObject_t gaugeTex,
int precision){
if(precision == 4){
gaussianSmearing_kernel<float>(out,vecInTex,gaugeTex);
}
else if(precision == 8){
gaussianSmearing_kernel<double>(out,vecInTex,gaugeTex);
}
else{
errorQuda("Precision not supported\n");
}
}
void quda::run_UploadToCuda(void* in,ColorSpinorField &qudaVec, int precision, bool isEven){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if( qudaVec.SiteSubset() == QUDA_PARITY_SITE_SUBSET ){
if( isEven ){
if(precision == 4){
uploadToCuda_kernel<float2><<<gridDim,blockDim>>>((float2*) in,(double2*) qudaVec.V(), NULL );
}
else if(precision == 8){
uploadToCuda_kernel<double2><<<gridDim,blockDim>>>((double2*) in,(double2*) qudaVec.V(), NULL );
}
else{
errorQuda("Precision not supported\n");
}
}
else{
if(precision == 4){
uploadToCuda_kernel<float2><<<gridDim,blockDim>>>((float2*) in, NULL,(double2*) qudaVec.V() );
}
else if(precision == 8){
uploadToCuda_kernel<double2><<<gridDim,blockDim>>>((double2*) in, NULL,(double2*) qudaVec.V());
}
else{
errorQuda("Precision not supported\n");
}
}
}
else{
// printfQuda("### Uploading to QUDA both even and odd sites\n");
if(precision == 4){
uploadToCuda_kernel<float2><<<gridDim,blockDim>>>((float2*) in,(double2*) qudaVec.Even().V(), (double2*) qudaVec.Odd().V() );
}
else if(precision == 8){
uploadToCuda_kernel<double2><<<gridDim,blockDim>>>((double2*) in,(double2*) qudaVec.Even().V(), (double2*) qudaVec.Odd().V() );
}
else{
errorQuda("Precision not supported\n");
}
}
checkCudaError();
}
void quda::run_DownloadFromCuda(void* out,ColorSpinorField &qudaVec, int precision, bool isEven){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if( qudaVec.SiteSubset() == QUDA_PARITY_SITE_SUBSET ){
if( isEven ){
if(precision == 4){
downloadFromCuda_kernel<float2><<<gridDim,blockDim>>>((float2*) out,(double2*) qudaVec.V(), NULL );
}
else if(precision == 8){
downloadFromCuda_kernel<double2><<<gridDim,blockDim>>>((double2*) out,(double2*) qudaVec.V(), NULL );
}
else{
errorQuda("Precision not supported\n");
}
}
else{
if(precision == 4){
downloadFromCuda_kernel<float2><<<gridDim,blockDim>>>((float2*) out, NULL,(double2*) qudaVec.V() );
}
else if(precision == 8){
downloadFromCuda_kernel<double2><<<gridDim,blockDim>>>((double2*) out, NULL,(double2*) qudaVec.V());
}
else{
errorQuda("Precision not supported\n");
}
}
}
else{
// printfQuda("### Downloading from QUDA both even and odd sites\n");
if(precision == 4){
downloadFromCuda_kernel<float2><<<gridDim,blockDim>>>((float2*) out,(double2*) qudaVec.Even().V(), (double2*) qudaVec.Odd().V() );
}
else if(precision == 8){
downloadFromCuda_kernel<double2><<<gridDim,blockDim>>>((double2*) out,(double2*) qudaVec.Even().V(), (double2*) qudaVec.Odd().V() );
}
else{
errorQuda("Precision not supported\n");
}
}
checkCudaError();
}
void quda::run_ScaleVector(double a, void* inOut, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
scaleVector_kernel<float,float2><<<gridDim,blockDim>>>((float) a, (float2*) inOut);
}
else if(precision == 8){
scaleVector_kernel<double,double2><<<gridDim,blockDim>>>((double) a, (double2*) inOut);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
template<typename Float2,typename Float>
static void contractMesons_kernel(cudaTextureObject_t texProp1,
cudaTextureObject_t texProp2,
Float (*corr)[2][10],
int it, int isource,
CORR_SPACE CorrSpace){
if( typeid(Float2) != typeid(float2) ) errorQuda("Unsupported precision for Meson 2pt Contraction kernels!\n");
int SpVol = GK_localVolume/GK_localL[3];
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (SpVol + blockDim.x -1)/blockDim.x , 1 , 1); // spawn threads only for the spatial volume
Float *h_partial_block = NULL;
Float *d_partial_block = NULL;
if(CorrSpace==POSITION_SPACE){
long int alloc_size = blockDim.x * gridDim.x; // That's basically local spatial volume
h_partial_block = (Float*)malloc(alloc_size*2*10*2*sizeof(Float));
if(h_partial_block == NULL) errorQuda("contractMesons_kernel: Cannot allocate host block.\n");
cudaMalloc((void**)&d_partial_block, alloc_size*2*10*2*sizeof(Float));
checkCudaError();
contractMesons_kernel_PosSpace_float<<<gridDim,blockDim>>>((float2*) d_partial_block, texProp1, texProp2, it, GK_sourcePosition[isource][0] , GK_sourcePosition[isource][1], GK_sourcePosition[isource][2]);
checkCudaError();
cudaMemcpy(h_partial_block , d_partial_block , alloc_size*2*10*2*sizeof(Float) , cudaMemcpyDeviceToHost);
checkCudaError();
//-C.K. Copy host block into corr buffer
for(int pt = 0; pt < 2 ; pt++){
for( int mes = 0; mes < 10; mes++){
for(int sv = 0; sv < SpVol ; sv++){
corr[ 0 + 2*sv + 2*SpVol*it ][pt][mes] = h_partial_block[ 0 + 2*sv + 2*SpVol*mes + 2*SpVol*10*pt ];
corr[ 1 + 2*sv + 2*SpVol*it ][pt][mes] = h_partial_block[ 1 + 2*sv + 2*SpVol*mes + 2*SpVol*10*pt ];
}
}
}
free(h_partial_block);
cudaFree(d_partial_block);
checkCudaError();
}
else if(CorrSpace==MOMENTUM_SPACE){
h_partial_block = (Float*)malloc(GK_Nmoms*2*10*gridDim.x*2*sizeof(Float));
if(h_partial_block == NULL) errorQuda("Error problem with allocation\n");
cudaMalloc((void**)&d_partial_block, GK_Nmoms*2*10*gridDim.x*2 * sizeof(Float) );
checkCudaError();
Float *reduction =(Float*) calloc(GK_Nmoms*2*10*2,sizeof(Float));
contractMesons_kernel_float<<<gridDim,blockDim>>>((float2*) d_partial_block, texProp1, texProp2, it, GK_sourcePosition[isource][0] , GK_sourcePosition[isource][1], GK_sourcePosition[isource][2]);
checkCudaError();
cudaMemcpy(h_partial_block , d_partial_block , GK_Nmoms*2*10*gridDim.x*2 * sizeof(Float) , cudaMemcpyDeviceToHost);
checkCudaError();
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iu = 0 ; iu < 2 ; iu++)
for(int ip = 0 ; ip < 10 ; ip++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[imom*2*10*2 + iu*10*2 + ip*2 + 0] += h_partial_block[imom*2*10*gridDim.x*2 + iu*10*gridDim.x*2 + ip*gridDim.x*2 + i*2 + 0];
reduction[imom*2*10*2 + iu*10*2 + ip*2 + 1] += h_partial_block[imom*2*10*gridDim.x*2 + iu*10*gridDim.x*2 + ip*gridDim.x*2 + i*2 + 1];
}
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iu = 0 ; iu < 2 ; iu++)
for(int ip = 0 ; ip < 10 ; ip++){
corr[it*GK_Nmoms*2 + imom*2 + 0][iu][ip] = reduction[imom*2*10*2 + iu*10*2 + ip*2 + 0];
corr[it*GK_Nmoms*2 + imom*2 + 1][iu][ip] = reduction[imom*2*10*2 + iu*10*2 + ip*2 + 1];
}
free(h_partial_block);
cudaFree(d_partial_block);
checkCudaError();
free(reduction);
}//-CorrSpace else
else errorQuda("contractMesons_kernel: Supports only POSITION_SPACE and MOMENTUM_SPACE!\n");
}
void quda::run_contractMesons(cudaTextureObject_t texProp1,
cudaTextureObject_t texProp2,
void* corr, int it, int isource,
int precision, CORR_SPACE CorrSpace){
if (CorrSpace==POSITION_SPACE) cudaFuncSetCacheConfig(contractMesons_kernel_PosSpace_float,cudaFuncCachePreferShared);
else if(CorrSpace==MOMENTUM_SPACE) cudaFuncSetCacheConfig(contractMesons_kernel_float ,cudaFuncCachePreferShared);
else errorQuda("run_contractMesons: Supports only POSITION_SPACE and MOMENTUM_SPACE!\n");
checkCudaError();
if(precision == 4) contractMesons_kernel<float2,float>(texProp1,texProp2,(float(*)[2][10]) corr,it, isource, CorrSpace);
else if(precision == 8) errorQuda("Double precision in Meson 2pt Contractions unsupported!!!\n");
else errorQuda("run_contractMesons: Precision %d not supported\n",precision);
}
template<typename Float2,typename Float>
static void contractBaryons_kernel(cudaTextureObject_t texProp1,
cudaTextureObject_t texProp2,
Float (*corr)[2][10][4][4],
int it, int isource,
CORR_SPACE CorrSpace){
if( typeid(Float2) != typeid(float2) ) errorQuda("Unsupported precision for Baryon 2pt Contraction kernels!\n");
int SpVol = GK_localVolume/GK_localL[3];
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (SpVol + blockDim.x -1)/blockDim.x , 1 , 1); // spawn threads only for the spatial volume
Float *h_partial_block = NULL;
Float *d_partial_block = NULL;
if(CorrSpace==POSITION_SPACE){
long int alloc_size = blockDim.x * gridDim.x; // That's basically local spatial volume
h_partial_block = (Float*)malloc(alloc_size*2*4*4*2*sizeof(Float));
if(h_partial_block == NULL) errorQuda("contractBaryons_kernel: Cannot allocate host block.\n");
cudaMalloc((void**)&d_partial_block, alloc_size*2*4*4*2*sizeof(Float));
checkCudaError();
for(int ip = 0 ; ip < 10 ; ip++){
contractBaryons_kernel_PosSpace_float<<<gridDim,blockDim>>>((float2*) d_partial_block, texProp1, texProp2, it, GK_sourcePosition[isource][0] , GK_sourcePosition[isource][1], GK_sourcePosition[isource][2],ip);
checkCudaError();
cudaMemcpy(h_partial_block , d_partial_block , alloc_size*2*4*4*2*sizeof(Float) , cudaMemcpyDeviceToHost); //-C.K. Copy device block into host block
checkCudaError();
//-C.K. Copy host block into corr buffer
for(int pt = 0; pt < 2 ; pt++){
for(int ga = 0 ; ga < 4 ; ga++){
for(int gap = 0; gap < 4 ; gap++){
for(int sv = 0; sv < SpVol ; sv++){
corr[ 0 + 2*sv + 2*SpVol*it ][pt][ip][ga][gap] = h_partial_block[ 0 + 2*sv + 2*SpVol*gap + 2*SpVol*4*ga + 2*SpVol*4*4*pt ];
corr[ 1 + 2*sv + 2*SpVol*it ][pt][ip][ga][gap] = h_partial_block[ 1 + 2*sv + 2*SpVol*gap + 2*SpVol*4*ga + 2*SpVol*4*4*pt ];
}}}
}
}//-ip
free(h_partial_block);
cudaFree(d_partial_block);
checkCudaError();
}
else if(CorrSpace==MOMENTUM_SPACE){
h_partial_block = (Float*)malloc(GK_Nmoms*2*4*4*gridDim.x*2*sizeof(Float));
if(h_partial_block == NULL) errorQuda("contractBaryons_kernel: Cannot allocate host block.\n");
cudaMalloc((void**)&d_partial_block, GK_Nmoms*2*4*4*gridDim.x*2 * sizeof(Float) );
checkCudaError();
Float *reduction =(Float*) calloc(GK_Nmoms*2*4*4*2,sizeof(Float));
for(int ip = 0 ; ip < 10 ; ip++){
contractBaryons_kernel_float<<<gridDim,blockDim>>>((float2*) d_partial_block, texProp1, texProp2, it, GK_sourcePosition[isource][0] , GK_sourcePosition[isource][1], GK_sourcePosition[isource][2],ip);
checkCudaError();
cudaMemcpy(h_partial_block , d_partial_block , GK_Nmoms*2*4*4*gridDim.x*2 * sizeof(Float) , cudaMemcpyDeviceToHost);
checkCudaError();
memset(reduction,0,GK_Nmoms*2*4*4*2*sizeof(Float));
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iu = 0 ; iu < 2 ; iu++)
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gammap = 0 ; gammap < 4 ; gammap++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[imom*2*4*4*2 + iu*4*4*2 + gamma*4*2 + gammap*2 + 0] += h_partial_block[imom*2*4*4*gridDim.x*2 + iu*4*4*gridDim.x*2 + gamma*4*gridDim.x*2 + gammap*gridDim.x*2 + i*2 + 0];
reduction[imom*2*4*4*2 + iu*4*4*2 + gamma*4*2 + gammap*2 + 1] += h_partial_block[imom*2*4*4*gridDim.x*2 + iu*4*4*gridDim.x*2 + gamma*4*gridDim.x*2 + gammap*gridDim.x*2 + i*2 + 1];
}
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iu = 0 ; iu < 2 ; iu++)
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gammap = 0 ; gammap < 4 ; gammap++){
corr[it*GK_Nmoms*2 + imom*2 + 0][iu][ip][gamma][gammap] = reduction[imom*2*4*4*2 + iu*4*4*2 + gamma*4*2 + gammap*2 + 0];
corr[it*GK_Nmoms*2 + imom*2 + 1][iu][ip][gamma][gammap] = reduction[imom*2*4*4*2 + iu*4*4*2 + gamma*4*2 + gammap*2 + 1];
}
}//-ip
free(h_partial_block);
cudaFree(d_partial_block);
checkCudaError();
free(reduction);
}//-CorrSpace else
else errorQuda("contractBaryons_kernel: Supports only POSITION_SPACE and MOMENTUM_SPACE!\n");
}
void quda::run_contractBaryons(cudaTextureObject_t texProp1,
cudaTextureObject_t texProp2,
void* corr, int it,
int isource, int precision,
CORR_SPACE CorrSpace){
if (CorrSpace==POSITION_SPACE) cudaFuncSetCacheConfig(contractBaryons_kernel_PosSpace_float,cudaFuncCachePreferShared);
else if(CorrSpace==MOMENTUM_SPACE) cudaFuncSetCacheConfig(contractBaryons_kernel_float ,cudaFuncCachePreferShared);
else errorQuda("run_contractBaryons: Supports only POSITION_SPACE and MOMENTUM_SPACE!\n");
checkCudaError();
if(precision == 4) contractBaryons_kernel<float2,float>(texProp1,texProp2,(float(*)[2][10][4][4]) corr,it, isource, CorrSpace);
else if(precision == 8) errorQuda("Double precision in Baryon 2pt Contractions unsupported!!!\n");
else errorQuda("run_contractBaryons: Precision %d not supported\n",precision);
}
void quda::run_rotateToPhysicalBase(void* inOut, int sign, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
rotateToPhysicalBase_kernel<float2><<<gridDim,blockDim>>>((float2*) inOut,sign);
}
else if(precision == 8){
rotateToPhysicalBase_kernel<double2><<<gridDim,blockDim>>>((double2*) inOut,sign);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
void quda::run_castDoubleToFloat(void *out, void *in){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
castDoubleToFloat_kernel<<<gridDim,blockDim>>>((float2*) out, (double2*) in);
checkCudaError();
}
void quda::run_castFloatToDouble(void *out, void *in){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
castFloatToDouble_kernel<<<gridDim,blockDim>>>((double2*) out, (float2*) in);
checkCudaError();
}
void quda::run_conjugate_vector(void *inOut, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
conjugate_vector_kernel<float2><<<gridDim,blockDim>>>((float2*) inOut);
}
else if(precision == 8){
conjugate_vector_kernel<double2><<<gridDim,blockDim>>>((double2*) inOut);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
void quda::run_apply_gamma5_vector(void *inOut, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
apply_gamma5_vector_kernel<float2><<<gridDim,blockDim>>>((float2*) inOut);
}
else if(precision == 8){
apply_gamma5_vector_kernel<double2><<<gridDim,blockDim>>>((double2*) inOut);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
void quda::run_conjugate_propagator(void *inOut, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
conjugate_propagator_kernel<float2><<<gridDim,blockDim>>>((float2*) inOut);
}
else if(precision == 8){
conjugate_propagator_kernel<double2><<<gridDim,blockDim>>>((double2*) inOut);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
void quda::run_apply_gamma5_propagator(void *inOut, int precision){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
if(precision == 4){
apply_gamma5_propagator_kernel<float2><<<gridDim,blockDim>>>((float2*) inOut);
}
else if(precision == 8){
apply_gamma5_propagator_kernel<double2><<<gridDim,blockDim>>>((double2*) inOut);
}
else{
errorQuda("Precision not supported\n");
}
checkCudaError();
}
template<typename Float>
static void seqSourceFixSinkPart1_kernel(void* out, int timeslice,
cudaTextureObject_t tex1,
cudaTextureObject_t tex2,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume/GK_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
if( typeid(Float) == typeid(float) )
seqSourceFixSinkPart1_kernel_float<<<gridDim,blockDim>>>((float2*) out, timeslice, tex1, tex2, c_nu, c_c2, PID, PARTICLE);
else if(typeid(Float) == typeid(double))
seqSourceFixSinkPart1_kernel_double<<<gridDim,blockDim>>>((double2*) out, timeslice, tex1, tex2, c_nu, c_c2, PID, PARTICLE);
checkCudaError();
}
template<typename Float>
static void seqSourceFixSinkPart2_kernel(void* out, int timeslice,
cudaTextureObject_t tex,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE ){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume/GK_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
if( typeid(Float) == typeid(float) )
seqSourceFixSinkPart2_kernel_float<<<gridDim,blockDim>>>((float2*) out, timeslice, tex, c_nu, c_c2, PID, PARTICLE);
else if(typeid(Float) == typeid(double))
seqSourceFixSinkPart2_kernel_double<<<gridDim,blockDim>>>((double2*) out, timeslice, tex, c_nu, c_c2, PID, PARTICLE);
checkCudaError();
}
void quda::run_seqSourceFixSinkPart1(void* out, int timeslice,
cudaTextureObject_t tex1,
cudaTextureObject_t tex2,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE,
int precision){
if(precision == 4){
seqSourceFixSinkPart1_kernel<float>(out, timeslice, tex1, tex2, c_nu, c_c2, PID, PARTICLE);
}
else if(precision == 8){
seqSourceFixSinkPart1_kernel<double>(out, timeslice, tex1, tex2, c_nu, c_c2, PID, PARTICLE);
}
else{
errorQuda("Precision not supported\n");
}
}
void quda::run_seqSourceFixSinkPart2(void* out, int timeslice,
cudaTextureObject_t tex,
int c_nu, int c_c2,
WHICHPROJECTOR PID,
WHICHPARTICLE PARTICLE,
int precision){
if(precision == 4){
seqSourceFixSinkPart2_kernel<float>(out, timeslice, tex, c_nu, c_c2, PID, PARTICLE);
}
else if(precision == 8){
seqSourceFixSinkPart2_kernel<double>(out, timeslice, tex, c_nu, c_c2, PID, PARTICLE);
}
else{
errorQuda("Precision not supported\n");
}
}
template<typename Float2,typename Float>
static void fixSinkContractions_kernel(void* corrThp_local,
void* corrThp_noether,
void* corrThp_oneD,
cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex,
cudaTextureObject_t gaugeTex,
WHICHPARTICLE PARTICLE,
int partflag, int itime,
int isource, CORR_SPACE CorrSpace){
int SpVol = GK_localVolume/GK_localL[3];
int lV = GK_localVolume;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (GK_localVolume/GK_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // spawn threads only for the spatial volume
Float *h_partial_block = NULL;
Float *d_partial_block = NULL;
if(CorrSpace==POSITION_SPACE){
size_t alloc_buf;
size_t copy_buf;
//- Ultra-local operators
alloc_buf = blockDim.x * gridDim.x * 16 * 2 * sizeof(Float);
copy_buf = SpVol * 16 * 2 * sizeof(Float);
cudaMalloc((void**)&d_partial_block, alloc_buf);
checkCudaError();
cudaMemset(d_partial_block, 0, alloc_buf);
checkCudaError();
if( typeid(Float2) == typeid(float2) )
fixSinkContractions_local_kernel_PosSpace_float<<<gridDim,blockDim>>> ((float2*) d_partial_block, fwdTex, seqTex, PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
fixSinkContractions_local_kernel_PosSpace_double<<<gridDim,blockDim>>>((double2*) d_partial_block, fwdTex, seqTex, PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
//-C.K. Copy device block into corrThp_local
cudaMemcpy(&(((Float*)corrThp_local)[2*16*SpVol*itime]) , d_partial_block , copy_buf , cudaMemcpyDeviceToHost);
checkCudaError();
//----------------------------------------------------------------------
//- One-derivative operators
for(int dir = 0 ; dir < 4 ; dir++){
cudaMemset(d_partial_block, 0, alloc_buf);
checkCudaError();
if( typeid(Float2) == typeid(float2) )
fixSinkContractions_oneD_kernel_PosSpace_float<<<gridDim,blockDim>>> ((float2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime, dir,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
fixSinkContractions_oneD_kernel_PosSpace_double<<<gridDim,blockDim>>>((double2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime, dir,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
//-C.K. Copy device block into corrThp_oneD for each dir
cudaMemcpy(&(((Float*)corrThp_oneD)[2*16*SpVol*itime + 2*16*lV*dir]), d_partial_block , copy_buf , cudaMemcpyDeviceToHost);
checkCudaError();
}//-dir
//----------------------------------------------------------------------
//- Noether, conserved current
//- it's better to reallocate the device block buffer here
cudaFree(d_partial_block);
checkCudaError();
d_partial_block = NULL;
alloc_buf = blockDim.x * gridDim.x * 4 * 2 * sizeof(Float);
copy_buf = SpVol * 4 * 2 * sizeof(Float);
cudaMalloc((void**)&d_partial_block, alloc_buf);
checkCudaError();
cudaMemset(d_partial_block, 0, alloc_buf);
checkCudaError();
if( typeid(Float2) == typeid(float2) )
fixSinkContractions_noether_kernel_PosSpace_float<<<gridDim,blockDim>>> ((float2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
fixSinkContractions_noether_kernel_PosSpace_double<<<gridDim,blockDim>>>((double2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
//-C.K. Copy device block to corrThp_noether
cudaMemcpy(&(((Float*)corrThp_noether)[2*4*SpVol*itime]) , d_partial_block , copy_buf , cudaMemcpyDeviceToHost);
checkCudaError();
cudaFree(d_partial_block);
checkCudaError();
}
else if(CorrSpace==MOMENTUM_SPACE){
h_partial_block = (Float*)malloc(GK_Nmoms*16*gridDim.x*2*sizeof(Float));
if(h_partial_block == NULL) errorQuda("fixSinkContractions_kernel: Cannot allocate host block.\n");
cudaMalloc((void**)&d_partial_block, GK_Nmoms*16*gridDim.x*2 * sizeof(Float) );
checkCudaError();
Float *reduction =(Float*) calloc(GK_Nmoms*16*2,sizeof(Float));
//- Ultra-local operators
if( typeid(Float2) == typeid(float2) )
fixSinkContractions_local_kernel_float<<<gridDim,blockDim>>>((float2*) d_partial_block, fwdTex, seqTex, PARTICLE,
partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
fixSinkContractions_local_kernel_double<<<gridDim,blockDim>>>((double2*) d_partial_block, fwdTex, seqTex, PARTICLE,
partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
cudaMemcpy(h_partial_block , d_partial_block , GK_Nmoms*16*gridDim.x*2 * sizeof(Float) , cudaMemcpyDeviceToHost);
checkCudaError();
memset(reduction,0,GK_Nmoms*16*2*sizeof(Float));
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iop = 0 ; iop < 16 ; iop++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[imom*16*2 + iop*2 + 0] += h_partial_block[imom*16*gridDim.x*2 + iop*gridDim.x*2 + i*2 + 0];
reduction[imom*16*2 + iop*2 + 1] += h_partial_block[imom*16*gridDim.x*2 + iop*gridDim.x*2 + i*2 + 1];
}
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iop = 0 ; iop < 16 ; iop++)
for(int i =0 ; i < gridDim.x ; i++){
((Float*) corrThp_local)[itime*GK_Nmoms*16*2 + imom*16*2 + iop*2 + 0] = reduction[imom*16*2 + iop*2 + 0];
((Float*) corrThp_local)[itime*GK_Nmoms*16*2 + imom*16*2 + iop*2 + 1] = reduction[imom*16*2 + iop*2 + 1];
}
//---------------------------------------------------------------
//- Noether, conserved current
if( typeid(Float2) == typeid(float2) )
fixSinkContractions_noether_kernel_float<<<gridDim,blockDim>>>((float2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
fixSinkContractions_noether_kernel_double<<<gridDim,blockDim>>>((double2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
cudaMemcpy(h_partial_block , d_partial_block , GK_Nmoms*4*gridDim.x*2 * sizeof(Float) , cudaMemcpyDeviceToHost);
checkCudaError();
memset(reduction,0,GK_Nmoms*4*2*sizeof(Float));
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int dir = 0 ; dir < 4 ; dir++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[imom*4*2 + dir*2 + 0] += h_partial_block[imom*4*gridDim.x*2 + dir*gridDim.x*2 + i*2 + 0];
reduction[imom*4*2 + dir*2 + 1] += h_partial_block[imom*4*gridDim.x*2 + dir*gridDim.x*2 + i*2 + 1];
}
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int dir = 0 ; dir < 4 ; dir++)
for(int i =0 ; i < gridDim.x ; i++){
((Float*) corrThp_noether)[itime*GK_Nmoms*4*2 + imom*4*2 + dir*2 + 0] = reduction[imom*4*2 + dir*2 + 0];
((Float*) corrThp_noether)[itime*GK_Nmoms*4*2 + imom*4*2 + dir*2 + 1] = reduction[imom*4*2 + dir*2 + 1];
}
//---------------------------------------------------------------
//- One-derivative operators
for(int dir = 0 ; dir < 4 ; dir++){
if( typeid(Float2) == typeid(float2) )
fixSinkContractions_oneD_kernel_float<<<gridDim,blockDim>>>((float2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime, dir,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
else if( typeid(Float2) == typeid(double2) )
fixSinkContractions_oneD_kernel_double<<<gridDim,blockDim>>>((double2*) d_partial_block, fwdTex, seqTex, gaugeTex,
PARTICLE, partflag, itime, dir,
GK_sourcePosition[isource][0],
GK_sourcePosition[isource][1],
GK_sourcePosition[isource][2]);
cudaMemcpy(h_partial_block , d_partial_block , GK_Nmoms*16*gridDim.x*2 * sizeof(Float) , cudaMemcpyDeviceToHost);
checkCudaError();
memset(reduction,0,GK_Nmoms*16*2*sizeof(Float));
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iop = 0 ; iop < 16 ; iop++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[imom*16*2 + iop*2 + 0] += h_partial_block[imom*16*gridDim.x*2 + iop*gridDim.x*2 + i*2 + 0];
reduction[imom*16*2 + iop*2 + 1] += h_partial_block[imom*16*gridDim.x*2 + iop*gridDim.x*2 + i*2 + 1];
}
for(int imom = 0 ; imom < GK_Nmoms ; imom++)
for(int iop = 0 ; iop < 16 ; iop++)
for(int i =0 ; i < gridDim.x ; i++){
((Float*) corrThp_oneD)[itime*GK_Nmoms*4*16*2 + imom*4*16*2 + dir*16*2 + iop*2 + 0] = reduction[imom*16*2 + iop*2 + 0];
((Float*) corrThp_oneD)[itime*GK_Nmoms*4*16*2 + imom*4*16*2 + dir*16*2 + iop*2 + 1] = reduction[imom*16*2 + iop*2 + 1];
}
}//-dir
//---------------------------------------------------------------
free(h_partial_block);
cudaFree(d_partial_block);
checkCudaError();
free(reduction);
}
else errorQuda("fixSinkContractions_kernel: Supports only POSITION_SPACE and MOMENTUM_SPACE!\n");
}
void quda::run_fixSinkContractions(void* corrThp_local, void* corrThp_noether,
void* corrThp_oneD, cudaTextureObject_t fwdTex,
cudaTextureObject_t seqTex, cudaTextureObject_t gaugeTex,
WHICHPARTICLE PARTICLE, int partflag, int it,
int isource, int precision, CORR_SPACE CorrSpace){
if(precision == 4)
fixSinkContractions_kernel<float2,float> (corrThp_local, corrThp_noether,
corrThp_oneD, fwdTex, seqTex,
gaugeTex, PARTICLE, partflag,
it, isource, CorrSpace);
else if(precision == 8)
fixSinkContractions_kernel<double2,double>(corrThp_local, corrThp_noether,
corrThp_oneD, fwdTex, seqTex,
gaugeTex, PARTICLE, partflag,
it, isource, CorrSpace);
else errorQuda("run_fixSinkContractions: Precision %d not supported\n",precision);
}
|
3b0417b312bab13f93e83161102fcacc508ec427.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cstdio>
#include<iostream>
#include"cuda.h"
#include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<cmath>
#include<time.h>
#include <Windows.h>
#define N 106182300
using namespace std;
void add_with_cpu(double A[], int len) {
double ans = 0;
clock_t start, end;
start = clock();
for (int i = 0; i < len; i++) {
ans += A[i];
}
end = clock();
cout << "With cpu: " << "ans:" << ans << " " << "time:" << end-start << "ms" << endl;
}
__global__ static void add_with_all_atomic(double *A, int len, double *result) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < len) {
atomicAdd(result, A[id]);
id += gridDim.x * blockDim.x;
}
}
__global__ static void add_with_few_atomic(double *A, int len, double *result) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0.0;
while (id < len) {
temp += A[id];
id += gridDim.x * blockDim.x;
}
atomicAdd(result, temp);
}
__global__ static void add_without_atomic(double *A, double *B, int len) {
extern __shared__ double cache[];
int id = threadIdx.x + blockIdx.x * blockDim.x;
double x = 0;
if (id < len) {
x = A[id];
}
cache[threadIdx.x] = x;
__syncthreads();
for (int offset = blockDim.x / 2; offset > 0; offset >>= 1 ) {
if (threadIdx.x < offset)
cache[threadIdx.x] += cache[threadIdx.x + offset];
__syncthreads();
}
if (threadIdx.x == 0) {
B[blockIdx.x] == cache[0];
}
}
int main() {
double *A = new double[N];
double result=0;
int len;
double *dev_A;
double *dev_result;
hipMalloc((void**)&dev_A, N * sizeof(double));
hipMalloc((void**)&dev_result, sizeof(double));
for (int i = 0; i < N; i++) {
A[i] = (double)(rand() % 101) / 101;
}
result = 0;
len = N;
hipMemcpy(dev_A, A, N * sizeof(double),
hipMemcpyHostToDevice);
hipEvent_t start, stop;
float elapsedTime;
// PART1 All atomic
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
add_with_all_atomic << <64, 64 >> > (dev_A, len, dev_result);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(&result, dev_result, sizeof(double), hipMemcpyDeviceToHost);
cout << "With all atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
//PART2 Few Atomic
double *dev_result1;
hipMalloc((void**)&dev_result1, sizeof(double));
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
add_with_few_atomic << <64, 64 >> > (dev_A, len, dev_result1);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(&result, dev_result1, sizeof(double), hipMemcpyDeviceToHost);
cout << "With few atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
//part3
double *dev_result2;
hipMalloc((void**)&dev_result2, sizeof(double));
const int block_size = 512;
const int num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
double *partial_sums = 0;
hipMalloc((void**)&partial_sums, sizeof(double));
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
add_without_atomic << <num_blocks, block_size, block_size * sizeof(double) >> > (dev_A, partial_sums, len);
add_without_atomic << <1, num_blocks, num_blocks * sizeof(double) >> > (partial_sums, partial_sums + num_blocks, num_blocks);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(&dev_result2, partial_sums + num_blocks, sizeof(double), hipMemcpyDeviceToHost);
cout << "Without atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
add_with_cpu(A, len);
} | 3b0417b312bab13f93e83161102fcacc508ec427.cu | #include<cstdio>
#include<iostream>
#include"cuda.h"
#include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<cmath>
#include<time.h>
#include <Windows.h>
#define N 106182300
using namespace std;
void add_with_cpu(double A[], int len) {
double ans = 0;
clock_t start, end;
start = clock();
for (int i = 0; i < len; i++) {
ans += A[i];
}
end = clock();
cout << "With cpu: " << "ans:" << ans << " " << "time:" << end-start << "ms" << endl;
}
__global__ static void add_with_all_atomic(double *A, int len, double *result) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < len) {
atomicAdd(result, A[id]);
id += gridDim.x * blockDim.x;
}
}
__global__ static void add_with_few_atomic(double *A, int len, double *result) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0.0;
while (id < len) {
temp += A[id];
id += gridDim.x * blockDim.x;
}
atomicAdd(result, temp);
}
__global__ static void add_without_atomic(double *A, double *B, int len) {
extern __shared__ double cache[];
int id = threadIdx.x + blockIdx.x * blockDim.x;
double x = 0;
if (id < len) {
x = A[id];
}
cache[threadIdx.x] = x;
__syncthreads();
for (int offset = blockDim.x / 2; offset > 0; offset >>= 1 ) {
if (threadIdx.x < offset)
cache[threadIdx.x] += cache[threadIdx.x + offset];
__syncthreads();
}
if (threadIdx.x == 0) {
B[blockIdx.x] == cache[0];
}
}
int main() {
double *A = new double[N];
double result=0;
int len;
double *dev_A;
double *dev_result;
cudaMalloc((void**)&dev_A, N * sizeof(double));
cudaMalloc((void**)&dev_result, sizeof(double));
for (int i = 0; i < N; i++) {
A[i] = (double)(rand() % 101) / 101;
}
result = 0;
len = N;
cudaMemcpy(dev_A, A, N * sizeof(double),
cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float elapsedTime;
// PART1 All atomic
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
add_with_all_atomic << <64, 64 >> > (dev_A, len, dev_result);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(&result, dev_result, sizeof(double), cudaMemcpyDeviceToHost);
cout << "With all atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
//PART2 Few Atomic
double *dev_result1;
cudaMalloc((void**)&dev_result1, sizeof(double));
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
add_with_few_atomic << <64, 64 >> > (dev_A, len, dev_result1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(&result, dev_result1, sizeof(double), cudaMemcpyDeviceToHost);
cout << "With few atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
//part3
double *dev_result2;
cudaMalloc((void**)&dev_result2, sizeof(double));
const int block_size = 512;
const int num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
double *partial_sums = 0;
cudaMalloc((void**)&partial_sums, sizeof(double));
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
add_without_atomic << <num_blocks, block_size, block_size * sizeof(double) >> > (dev_A, partial_sums, len);
add_without_atomic << <1, num_blocks, num_blocks * sizeof(double) >> > (partial_sums, partial_sums + num_blocks, num_blocks);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(&dev_result2, partial_sums + num_blocks, sizeof(double), cudaMemcpyDeviceToHost);
cout << "Without atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
add_with_cpu(A, len);
} |
d77d5657667e77108506ec088a248d30cabc8bdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "multilogit.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
namespace NKernel {
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float tmpScore = 0;
float classApprox[ElementsPerThread];
ui8 targetClass[ElementsPerThread];
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
ui32 loadApproxIndex[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadApproxIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize)) : 0;
}
const float tmp = targetClass[j] < effectiveClassCount && idx < size ? __ldg(predictions + loadApproxIndex[j] + targetClass[j] * predictionsAlignSize) : 0.0f;
classApprox[j] = tmp - maxApprox[j];
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) : 0.0f;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (der && idx < size) {
for (int k = 0; k < effectiveClassCount; ++k) {
const float pk = __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der[idx + k * derAlignSize] = weight[j] * ((targetClass[j] == k ? 1.0f : 0.0f) - pk);
}
}
if (functionValue) {
const float logDenum = __logf(sumExpApproxForAllClasses[j]);
tmpScore += (idx < size) ? weight[j] * (classApprox[j] - logDenum) : 0;
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitSecondDerRowImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
int der2Row,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + idx + k * predictionsAlignSize)) : 0;
}
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) : 0;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (idx < size) {
float pRow = 0;
if (der2Row < effectiveClassCount) {
pRow = __expf(__ldg(predictions + idx + der2Row * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
} else {
pRow = __expf(-maxApprox[j]) / sumExpApproxForAllClasses[j];
}
for (int k = 0; k < der2Row; ++k) {
const float pk = __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der2[idx + k * der2AlignSize] = -weight[j] * pk * pRow;
}
der2[idx + der2Row * der2AlignSize] = weight[j] * (1.0 - pRow) * pRow;
}
}
}
void MultiLogitValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
hipLaunchKernelGGL(( MultiLogitValAndFirstDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiLogitSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
int der2Row, ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
hipLaunchKernelGGL(( MultiLogitSecondDerRowImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2Row, der2AlignSize, der2);
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float tmpScore = 0;
ui8 targetClass[ElementsPerThread];
float weight[ElementsPerThread];
ui32 loadPredictionIndex[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadPredictionIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + loadPredictionIndex[j] + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
const float c = clazz == targetClass[j] ? 1.0f : 0.0f;
const float direction = c - p;
if (der && idx < size) {
der[idx + clazz * derAlignSize] = weight[j] * direction;
}
if (functionValue) {
const float logExpValPlusOne = isfinite(expVal) ? __logf(1 + expVal) : val;
tmpScore += (idx < size) ? weight[j] * (c * val - logExpValPlusOne) / numClasses : 0;
}
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllSecondDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float weight[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + idx + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
if (der2 && idx < size) {
der2[idx + clazz * der2AlignSize] = weight[j] * p * (1.0f - p);
}
}
}
}
void MultiClassOneVsAllValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
hipLaunchKernelGGL(( MultiClassOneVsAllValAndFirstDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiClassOneVsAllSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
hipLaunchKernelGGL(( MultiClassOneVsAllSecondDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2AlignSize, der2);
}
}
__global__ void BuildConfusionMatrixBinsImpl(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, ui32 predictionsDim,
ui64 predictionsAlignSize,
bool isBinClass,
ui32* bins) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
const ui32 targetClass = static_cast<ui8>(__ldg(targetClasses + i));
float bestApprox = NegativeInfty();
int bestClass = -1;
predictions += i;
if (isBinClass) {
bestClass = __ldg(predictions) > 0;
} else {
for (int clazz = 0; clazz < numClasses; ++clazz) {
const float approx = clazz < predictionsDim ? __ldg(predictions + clazz * predictionsAlignSize) : 0.0f;
if (approx > bestApprox) {
bestApprox = approx;
bestClass = clazz;
}
}
}
bins[i] = bestClass * numClasses + targetClass;
}
}
void BuildConfusionMatrixBins(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, int predictionsDim, ui32 predictionsAlignSize,
bool isBinClass,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (size + blockSize - 1) / blockSize;
if (numBlocks) {
BuildConfusionMatrixBinsImpl << < numBlocks, blockSize, 0, stream >> >(targetClasses, numClasses, size, predictions, predictionsDim, predictionsAlignSize, isBinClass, bins);
}
}
}
| d77d5657667e77108506ec088a248d30cabc8bdf.cu | #include "multilogit.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
namespace NKernel {
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float tmpScore = 0;
float classApprox[ElementsPerThread];
ui8 targetClass[ElementsPerThread];
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
ui32 loadApproxIndex[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadApproxIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize)) : 0;
}
const float tmp = targetClass[j] < effectiveClassCount && idx < size ? __ldg(predictions + loadApproxIndex[j] + targetClass[j] * predictionsAlignSize) : 0.0f;
classApprox[j] = tmp - maxApprox[j];
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) : 0.0f;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (der && idx < size) {
for (int k = 0; k < effectiveClassCount; ++k) {
const float pk = __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der[idx + k * derAlignSize] = weight[j] * ((targetClass[j] == k ? 1.0f : 0.0f) - pk);
}
}
if (functionValue) {
const float logDenum = __logf(sumExpApproxForAllClasses[j]);
tmpScore += (idx < size) ? weight[j] * (classApprox[j] - logDenum) : 0;
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitSecondDerRowImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
int der2Row,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + idx + k * predictionsAlignSize)) : 0;
}
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) : 0;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (idx < size) {
float pRow = 0;
if (der2Row < effectiveClassCount) {
pRow = __expf(__ldg(predictions + idx + der2Row * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
} else {
pRow = __expf(-maxApprox[j]) / sumExpApproxForAllClasses[j];
}
for (int k = 0; k < der2Row; ++k) {
const float pk = __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der2[idx + k * der2AlignSize] = -weight[j] * pk * pRow;
}
der2[idx + der2Row * der2AlignSize] = weight[j] * (1.0 - pRow) * pRow;
}
}
}
void MultiLogitValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
MultiLogitValAndFirstDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiLogitSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
int der2Row, ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
MultiLogitSecondDerRowImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2Row, der2AlignSize, der2);
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float tmpScore = 0;
ui8 targetClass[ElementsPerThread];
float weight[ElementsPerThread];
ui32 loadPredictionIndex[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadPredictionIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + loadPredictionIndex[j] + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
const float c = clazz == targetClass[j] ? 1.0f : 0.0f;
const float direction = c - p;
if (der && idx < size) {
der[idx + clazz * derAlignSize] = weight[j] * direction;
}
if (functionValue) {
const float logExpValPlusOne = isfinite(expVal) ? __logf(1 + expVal) : val;
tmpScore += (idx < size) ? weight[j] * (c * val - logExpValPlusOne) / numClasses : 0;
}
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllSecondDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float weight[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + idx + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
if (der2 && idx < size) {
der2[idx + clazz * der2AlignSize] = weight[j] * p * (1.0f - p);
}
}
}
}
void MultiClassOneVsAllValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
MultiClassOneVsAllValAndFirstDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiClassOneVsAllSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
MultiClassOneVsAllSecondDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2AlignSize, der2);
}
}
__global__ void BuildConfusionMatrixBinsImpl(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, ui32 predictionsDim,
ui64 predictionsAlignSize,
bool isBinClass,
ui32* bins) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
const ui32 targetClass = static_cast<ui8>(__ldg(targetClasses + i));
float bestApprox = NegativeInfty();
int bestClass = -1;
predictions += i;
if (isBinClass) {
bestClass = __ldg(predictions) > 0;
} else {
for (int clazz = 0; clazz < numClasses; ++clazz) {
const float approx = clazz < predictionsDim ? __ldg(predictions + clazz * predictionsAlignSize) : 0.0f;
if (approx > bestApprox) {
bestApprox = approx;
bestClass = clazz;
}
}
}
bins[i] = bestClass * numClasses + targetClass;
}
}
void BuildConfusionMatrixBins(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, int predictionsDim, ui32 predictionsAlignSize,
bool isBinClass,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (size + blockSize - 1) / blockSize;
if (numBlocks) {
BuildConfusionMatrixBinsImpl << < numBlocks, blockSize, 0, stream >> >(targetClasses, numClasses, size, predictions, predictionsDim, predictionsAlignSize, isBinClass, bins);
}
}
}
|
0c96aadf4eb7e7f25b1985fa6972673ffde654ec.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <cassert>
#include <hip/hip_runtime.h>
#include <math.h>
#include "gpu_utils.h"
#include "cuda_utils.h"
#include "CudaPMEDirectForceBlock.h"
#include "CudaDirectForceKernels.h"
//
// Merge results from calc_force
//
__global__ void mergeNonbondResultsKernel(const int numBlock,
const long long int* __restrict__ biflam_in,
const long long int* __restrict__ biflam2_in,
double* __restrict__ biflam,
double* __restrict__ biflam2) {
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < numBlock) {
long long int val1 = biflam_in[tid];
long long int val2 = biflam2_in[tid];
atomicAdd(&biflam[tid], ((double)val1)*INV_FORCE_SCALE_VIR);
atomicAdd(&biflam2[tid], ((double)val2)*INV_FORCE_SCALE_VIR);
}
}
//
// merge results from calc_14_force
//
__global__ void merge14ResultsKernel(const int m,
const int* __restrict__ lowTriangleIJ,
const float* __restrict__ blockParam,
const int* __restrict__ siteMLD,
const float* __restrict__ bixlam,
const double* __restrict__ energyVdw14Block,
const double* __restrict__ energyElec14Block,
const double* __restrict__ energyExcl14Block,
double* __restrict__ biflam,
double* __restrict__ biflam2,
double* __restrict__ energyVdw,
double* __restrict__ energyElec,
double* __restrict__ energyExcl) {
// Shared memory required: min(m, blockDim.x)*3*sizeof(double)
extern __shared__ double sh_energyBuf[];
// Limit for shared memory access
const int shlim = min(m, blockDim.x);
volatile double* sh_energyVdw = &sh_energyBuf[0];
volatile double* sh_energyElec = &sh_energyBuf[shlim];
volatile double* sh_energyExcl = &sh_energyBuf[shlim*2];
const int k = threadIdx.x + blockIdx.x*blockDim.x;
if (threadIdx.x < shlim) {
sh_energyVdw[threadIdx.x] = 0.0;
sh_energyElec[threadIdx.x] = 0.0;
sh_energyExcl[threadIdx.x] = 0.0;
}
if (k < m) {
// lower triangle indices ib and jb could be calculated from: ceil((ceil(sqrt(1+8*k))-1)/2)-1;
// However, I'm worried about rounding errors so I'll use pre-calculated table here
// lowTriangleIbJB = (jb << 16) | ib
int ib = lowTriangleIJ[k];
int jb = ib;
ib &= 0xffff;
jb >>= 16;
float fscale = blockParam[k];
double energyVdw14BlockVal = energyVdw14Block[k];
double energyElec14BlockVal = energyElec14Block[k];
if (fscale != 1.0f && fscale > 0.0f) {
int ib_site = siteMLD[ib];
int jb_site = siteMLD[jb];
int ibb = (ib == jb) ? ib : ( ib == 0 ? jb : (jb == 0 ? ib : -1) );
double energyTot = energyVdw14BlockVal + energyElec14BlockVal;
if (ibb >= 0) {
atomicAdd(&biflam[ibb], energyTot);
} else if (ib_site != jb_site) {
atomicAdd(&biflam2[ib], ((double)bixlam[ib])*energyTot);
atomicAdd(&biflam2[jb], ((double)bixlam[jb])*energyTot);
}
}
//if (fscale /= one .and. fscale > zero) then
// call msld_lambdaforce(ibl, jbl, vdwpot_block, biflam_loc, biflam2_loc)
// call msld_lambdaforce(ibl, jbl, coulpot_block, biflam_loc, biflam2_loc)
//endif
/*
subroutine msld_lambdaforce(ibl,jbl,energy,biflam_loc,biflam2_loc)
integer, intent(in) :: ibl, jbl
real(chm_real), intent(in) :: energy
real(chm_real), intent(inout), optional :: biflam_loc(:), biflam2_loc(:)
if (present(biflam_loc) .and. present(biflam2_loc)) then
if (ibl.eq.jbl) then
biflam_loc(ibl) = biflam_loc(ibl) + energy
elseif (ibl.eq.1) then
biflam_loc(jbl) = biflam_loc(jbl) + energy
elseif (jbl.eq.1) then
biflam_loc(ibl) = biflam_loc(ibl) + energy
elseif (isitemld(ibl).ne.isitemld(jbl)) then
biflam2_loc(jbl) = biflam2_loc(jbl) + bixlam(ibl)*energy
biflam2_loc(ibl) = biflam2_loc(ibl) + bixlam(jbl)*energy
endif
*/
// Store energy into shared memory
double fscaled = (double)fscale;
sh_energyVdw[threadIdx.x] = fscaled*energyVdw14BlockVal;
sh_energyElec[threadIdx.x] = fscaled*energyElec14BlockVal;
sh_energyExcl[threadIdx.x] = fscaled*energyExcl14Block[k];
}
// Reduce energies within thread block
__syncthreads();
for (int d=1;d < shlim;d *= 2) {
int pos = threadIdx.x + d;
double energyVdw_val = (pos < shlim) ? sh_energyVdw[pos] : 0.0;
double energyElec_val = (pos < shlim) ? sh_energyElec[pos] : 0.0;
double energyExcl_val = (pos < shlim) ? sh_energyExcl[pos] : 0.0;
__syncthreads();
if (threadIdx.x < shlim) {
sh_energyVdw[threadIdx.x] += energyVdw_val;
sh_energyElec[threadIdx.x] += energyElec_val;
sh_energyExcl[threadIdx.x] += energyExcl_val;
}
__syncthreads();
}
// Write to global memory
if (threadIdx.x == 0) {
atomicAdd(energyVdw, sh_energyVdw[0]);
atomicAdd(energyElec, sh_energyElec[0]);
atomicAdd(energyExcl, sh_energyExcl[0]);
}
}
//########################################################################################
//########################################################################################
//########################################################################################
//
// Class creator
//
template <typename AT, typename CT>
CudaPMEDirectForceBlock<AT, CT>::CudaPMEDirectForceBlock(CudaEnergyVirial &energyVirial,
const char *nameVdw, const char *nameElec, const char *nameExcl,
CudaBlock &cudaBlock) :
CudaPMEDirectForce<AT,CT>(energyVirial, nameVdw, nameElec, nameExcl), cudaBlock(cudaBlock) {
biflamLen = 0;
biflam = NULL;
biflam2Len = 0;
biflam2 = NULL;
energy14BlockBuffer = NULL;
h_in14TblBlockPos = NULL;
h_ex14TblBlockPos = NULL;
// lowTriangleIbJB = (jb << 16) | ib
int m = cudaBlock.getNumBlock()*(cudaBlock.getNumBlock()+1)/2;
int *h_lowTriangleIJ = new int[m];
int k = 0;
for (int ib=0;ib < cudaBlock.getNumBlock();ib++) {
for (int jb=0;jb <= ib;jb++) {
h_lowTriangleIJ[k] = (jb << 16) | ib;
k++;
}
}
allocate<int>(&lowTriangleIJ, m);
copy_HtoD_sync<int>(h_lowTriangleIJ, lowTriangleIJ, m);
delete [] h_lowTriangleIJ;
}
//
// Class destructor
//
template <typename AT, typename CT>
CudaPMEDirectForceBlock<AT, CT>::~CudaPMEDirectForceBlock() {
if (biflam != NULL) deallocate<AT>(&biflam);
if (biflam2 != NULL) deallocate<AT>(&biflam2);
if (energy14BlockBuffer != NULL) deallocate<double>(&energy14BlockBuffer);
if (h_in14TblBlockPos != NULL) delete [] h_in14TblBlockPos;
if (h_ex14TblBlockPos != NULL) delete [] h_ex14TblBlockPos;
deallocate<int>(&lowTriangleIJ);
}
//
// Set values for 1-4 block position tables
//
template <typename AT, typename CT>
void CudaPMEDirectForceBlock<AT, CT>::set14BlockPos(int *h_in14TblBlockPos_in, int *h_ex14TblBlockPos_in) {
int m = cudaBlock.getNumBlock()*(cudaBlock.getNumBlock()+1)/2;
if (h_in14TblBlockPos == NULL) h_in14TblBlockPos = new int[m+1];
if (h_ex14TblBlockPos == NULL) h_ex14TblBlockPos = new int[m+1];
for (int i=0;i < m+1;i++) {
h_in14TblBlockPos[i] = h_in14TblBlockPos_in[i];
h_ex14TblBlockPos[i] = h_ex14TblBlockPos_in[i];
}
}
//
// Calculates 1-4 exclusions and interactions
//
template <typename AT, typename CT>
void CudaPMEDirectForceBlock<AT, CT>::calc_14_force(const float4 *xyzq,
const bool calc_energy, const bool calc_virial,
const int stride, AT *force, hipStream_t stream) {
if (this->use_tex_vdwparam14) {
#ifdef USE_TEXTURE_OBJECTS
if (!this->vdwParam14TexObjActive) {
std::cerr << "CudaPMEDirectForceBlock<AT, CT>::calc_14_force, vdwParam14TexObj must be created" << std::endl;
exit(1);
}
#else
if (!get_vdwparam14_texref_bound()) {
std::cerr << "CudaPMEDirectForceBlock<AT, CT>::calc_14_force, vdwparam14_texref must be bound" << std::endl;
exit(1);
}
#endif
}
int nthread = 512;
int shmem_size = 0;
if (calc_energy) {
shmem_size = nthread*sizeof(double2);
}
int vdw_model_loc = this->calc_vdw ? this->vdw_model : NONE;
int elec_model_loc = this->calc_elec ? this->elec_model : NONE;
if (elec_model_loc == NONE && vdw_model_loc == NONE) return;
int m = cudaBlock.getNumBlock()*(cudaBlock.getNumBlock()+1)/2;
if (calc_energy) {
if (energy14BlockBuffer == NULL) {
allocate<double>(&energy14BlockBuffer, m*3);
energyVdw14Block = &energy14BlockBuffer[0];
energyElec14Block = &energy14BlockBuffer[m];
energyExcl14Block = &energy14BlockBuffer[m*2];
}
clear_gpu_array<double>(energy14BlockBuffer, m*3, stream);
}
for (int k=0;k < m;k++) {
float fscale = cudaBlock.getBlockParamValue(k);
int pos_in14 = h_in14TblBlockPos[k];
int num_in14 = h_in14TblBlockPos[k+1] - h_in14TblBlockPos[k];
int pos_ex14 = h_ex14TblBlockPos[k];
int num_ex14 = h_ex14TblBlockPos[k+1] - h_ex14TblBlockPos[k];
int nin14block = (num_in14 - 1)/nthread + 1;
int nex14block = (num_ex14 - 1)/nthread + 1;
int nblock = nin14block + nex14block;
calcForce14KernelChoice<AT,CT>(nblock, nthread, shmem_size, stream,
vdw_model_loc, elec_model_loc, calc_energy, calc_virial,
num_in14, &this->in14list[pos_in14],
num_ex14, &this->ex14list[pos_ex14],
nin14block, this->vdwtype, this->vdwparam14,
#ifdef USE_TEXTURE_OBJECTS
this->vdwParam14TexObj,
#endif
xyzq, fscale, stride, force,
this->energyVirial.getVirialPointer(),
&energyVdw14Block[k], &energyElec14Block[k],
&energyExcl14Block[k]);
}
if (calc_energy) {
nthread = min( ((m-1)/warpsize+1)*warpsize, get_max_nthread());
shmem_size = min(m, nthread)*3*sizeof(double);
// Check if we want too much shared memory (this should not happen)
if (shmem_size > get_max_shmem_size()) {
std::cout << "CudaPMEDirectForceBlock::calc_14_force, amount of shared memory exceeded" << std::endl;
exit(1);
}
int nblock = (m - 1)/nthread + 1;
hipLaunchKernelGGL(( merge14ResultsKernel), dim3(nblock), dim3(nthread), shmem_size, stream ,
m, lowTriangleIJ, cudaBlock.getBlockParam(), cudaBlock.getSiteMLD(), cudaBlock.getBixlam(),
energyVdw14Block, energyElec14Block, energyExcl14Block,
cudaBlock.getBiflam(), cudaBlock.getBiflam2(),
this->energyVirial.getEnergyPointer(this->strVdw),
this->energyVirial.getEnergyPointer(this->strElec),
this->energyVirial.getEnergyPointer(this->strExcl));
cudaCheck(hipGetLastError());
}
}
//
// Calculates direct force
//
template <typename AT, typename CT>
void CudaPMEDirectForceBlock<AT, CT>::calc_force(const float4 *xyzq,
const CudaNeighborListBuild<32>& nlist,
const bool calc_energy,
const bool calc_virial,
const int stride, AT *force,
hipStream_t stream) {
#ifdef USE_TEXTURE_OBJECTS
if (!this->vdwParamTexObjActive) {
std::cerr << "CudaPMEDirectForceBlock<AT, CT>::calc_force, vdwParamTexObj must be created" << std::endl;
exit(1);
}
#else
if (!get_vdwparam_texref_bound()) {
std::cerr << "CudaPMEDirectForceBlock<AT, CT>::calc_force, vdwparam_texref must be bound"
<< std::endl;
exit(1);
}
#endif
#ifndef NUMBLOCK_LARGE
if (cudaBlock.getNumBlock() > 512) {
std::cerr << "CudaPMEDirectForceBlock<AT, CT>::calc_force, numBlock > 512 is not currently allowed" << std::endl;
exit(1);
}
#endif
// Re-allocate biflam and biflam2
reallocate<AT>(&biflam, &biflamLen, cudaBlock.getNumBlock());
reallocate<AT>(&biflam2, &biflam2Len, cudaBlock.getNumBlock());
// Clear biflam and biflam2
clear_gpu_array<AT>(biflam, cudaBlock.getNumBlock(), stream);
clear_gpu_array<AT>(biflam2, cudaBlock.getNumBlock(), stream);
if (nlist.get_n_ientry() == 0) return;
int vdw_model_loc = this->calc_vdw ? this->vdw_model : NONE;
int elec_model_loc = this->calc_elec ? this->elec_model : NONE;
if (elec_model_loc == NONE && vdw_model_loc == NONE) return;
int nwarp = 2;
if (get_cuda_arch() < 300) {
nwarp = 2;
} else {
nwarp = 4;
}
int nthread = warpsize*nwarp;
int nblock_tot = (nlist.get_n_ientry()-1)/(nthread/warpsize)+1;
int shmem_size = 0;
// (sh_xi, sh_yi, sh_zi, sh_qi, sh_vdwtypei, sh_blocktypei)
if (get_cuda_arch() < 300)
shmem_size += (nthread/warpsize)*tilesize*(sizeof(float)*4 + sizeof(int) + sizeof(int));
#ifndef NUMBLOCK_LARGE
shmem_size += cudaBlock.getNumBlock()*sizeof(float);
#endif
// (sh_fix, sh_fiy, sh_fiz)
shmem_size += (nthread/warpsize)*warpsize*sizeof(AT)*3;
// If no texture fetch for vdwparam:
//shmem_size += nvdwparam*sizeof(float);
if (calc_energy) shmem_size = max(shmem_size, (int)(nthread*sizeof(double)*2));
if (calc_virial) shmem_size = max(shmem_size, (int)(nthread*sizeof(double)*3));
calcForceKernelChoice<AT,CT>(nblock_tot, nthread, shmem_size, stream,
vdw_model_loc, elec_model_loc, calc_energy, calc_virial,
nlist, this->vdwparam, this->nvdwparam, this->vdwtype,
#ifdef USE_TEXTURE_OBJECTS
this->vdwParamTexObj,
#endif
xyzq, stride, force,
this->energyVirial.getVirialPointer(),
this->energyVirial.getEnergyPointer(this->strVdw),
this->energyVirial.getEnergyPointer(this->strElec),
&cudaBlock, this->biflam, this->biflam2);
// Convert biflam and biflam2 into double precision and add to cudaBlock.biflam -arrays
if (calc_energy) {
hipLaunchKernelGGL(( mergeNonbondResultsKernel), dim3((cudaBlock.getNumBlock()-1)/64+1), dim3(64), 0, stream ,
cudaBlock.getNumBlock(), biflam, biflam2, cudaBlock.getBiflam(), cudaBlock.getBiflam2());
cudaCheck(hipGetLastError());
}
}
//
// Explicit instances of CudaPMEDirectForceBlock
//
template class CudaPMEDirectForceBlock<long long int, float>;
| 0c96aadf4eb7e7f25b1985fa6972673ffde654ec.cu | #include <iostream>
#include <fstream>
#include <cassert>
#include <cuda.h>
#include <math.h>
#include "gpu_utils.h"
#include "cuda_utils.h"
#include "CudaPMEDirectForceBlock.h"
#include "CudaDirectForceKernels.h"
//
// Merge results from calc_force
//
__global__ void mergeNonbondResultsKernel(const int numBlock,
const long long int* __restrict__ biflam_in,
const long long int* __restrict__ biflam2_in,
double* __restrict__ biflam,
double* __restrict__ biflam2) {
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < numBlock) {
long long int val1 = biflam_in[tid];
long long int val2 = biflam2_in[tid];
atomicAdd(&biflam[tid], ((double)val1)*INV_FORCE_SCALE_VIR);
atomicAdd(&biflam2[tid], ((double)val2)*INV_FORCE_SCALE_VIR);
}
}
//
// merge results from calc_14_force
//
__global__ void merge14ResultsKernel(const int m,
const int* __restrict__ lowTriangleIJ,
const float* __restrict__ blockParam,
const int* __restrict__ siteMLD,
const float* __restrict__ bixlam,
const double* __restrict__ energyVdw14Block,
const double* __restrict__ energyElec14Block,
const double* __restrict__ energyExcl14Block,
double* __restrict__ biflam,
double* __restrict__ biflam2,
double* __restrict__ energyVdw,
double* __restrict__ energyElec,
double* __restrict__ energyExcl) {
// Shared memory required: min(m, blockDim.x)*3*sizeof(double)
extern __shared__ double sh_energyBuf[];
// Limit for shared memory access
const int shlim = min(m, blockDim.x);
volatile double* sh_energyVdw = &sh_energyBuf[0];
volatile double* sh_energyElec = &sh_energyBuf[shlim];
volatile double* sh_energyExcl = &sh_energyBuf[shlim*2];
const int k = threadIdx.x + blockIdx.x*blockDim.x;
if (threadIdx.x < shlim) {
sh_energyVdw[threadIdx.x] = 0.0;
sh_energyElec[threadIdx.x] = 0.0;
sh_energyExcl[threadIdx.x] = 0.0;
}
if (k < m) {
// lower triangle indices ib and jb could be calculated from: ceil((ceil(sqrt(1+8*k))-1)/2)-1;
// However, I'm worried about rounding errors so I'll use pre-calculated table here
// lowTriangleIbJB = (jb << 16) | ib
int ib = lowTriangleIJ[k];
int jb = ib;
ib &= 0xffff;
jb >>= 16;
float fscale = blockParam[k];
double energyVdw14BlockVal = energyVdw14Block[k];
double energyElec14BlockVal = energyElec14Block[k];
if (fscale != 1.0f && fscale > 0.0f) {
int ib_site = siteMLD[ib];
int jb_site = siteMLD[jb];
int ibb = (ib == jb) ? ib : ( ib == 0 ? jb : (jb == 0 ? ib : -1) );
double energyTot = energyVdw14BlockVal + energyElec14BlockVal;
if (ibb >= 0) {
atomicAdd(&biflam[ibb], energyTot);
} else if (ib_site != jb_site) {
atomicAdd(&biflam2[ib], ((double)bixlam[ib])*energyTot);
atomicAdd(&biflam2[jb], ((double)bixlam[jb])*energyTot);
}
}
//if (fscale /= one .and. fscale > zero) then
// call msld_lambdaforce(ibl, jbl, vdwpot_block, biflam_loc, biflam2_loc)
// call msld_lambdaforce(ibl, jbl, coulpot_block, biflam_loc, biflam2_loc)
//endif
/*
subroutine msld_lambdaforce(ibl,jbl,energy,biflam_loc,biflam2_loc)
integer, intent(in) :: ibl, jbl
real(chm_real), intent(in) :: energy
real(chm_real), intent(inout), optional :: biflam_loc(:), biflam2_loc(:)
if (present(biflam_loc) .and. present(biflam2_loc)) then
if (ibl.eq.jbl) then
biflam_loc(ibl) = biflam_loc(ibl) + energy
elseif (ibl.eq.1) then
biflam_loc(jbl) = biflam_loc(jbl) + energy
elseif (jbl.eq.1) then
biflam_loc(ibl) = biflam_loc(ibl) + energy
elseif (isitemld(ibl).ne.isitemld(jbl)) then
biflam2_loc(jbl) = biflam2_loc(jbl) + bixlam(ibl)*energy
biflam2_loc(ibl) = biflam2_loc(ibl) + bixlam(jbl)*energy
endif
*/
// Store energy into shared memory
double fscaled = (double)fscale;
sh_energyVdw[threadIdx.x] = fscaled*energyVdw14BlockVal;
sh_energyElec[threadIdx.x] = fscaled*energyElec14BlockVal;
sh_energyExcl[threadIdx.x] = fscaled*energyExcl14Block[k];
}
// Reduce energies within thread block
__syncthreads();
for (int d=1;d < shlim;d *= 2) {
int pos = threadIdx.x + d;
double energyVdw_val = (pos < shlim) ? sh_energyVdw[pos] : 0.0;
double energyElec_val = (pos < shlim) ? sh_energyElec[pos] : 0.0;
double energyExcl_val = (pos < shlim) ? sh_energyExcl[pos] : 0.0;
__syncthreads();
if (threadIdx.x < shlim) {
sh_energyVdw[threadIdx.x] += energyVdw_val;
sh_energyElec[threadIdx.x] += energyElec_val;
sh_energyExcl[threadIdx.x] += energyExcl_val;
}
__syncthreads();
}
// Write to global memory
if (threadIdx.x == 0) {
atomicAdd(energyVdw, sh_energyVdw[0]);
atomicAdd(energyElec, sh_energyElec[0]);
atomicAdd(energyExcl, sh_energyExcl[0]);
}
}
//########################################################################################
//########################################################################################
//########################################################################################
//
// Class creator
//
template <typename AT, typename CT>
CudaPMEDirectForceBlock<AT, CT>::CudaPMEDirectForceBlock(CudaEnergyVirial &energyVirial,
const char *nameVdw, const char *nameElec, const char *nameExcl,
CudaBlock &cudaBlock) :
CudaPMEDirectForce<AT,CT>(energyVirial, nameVdw, nameElec, nameExcl), cudaBlock(cudaBlock) {
biflamLen = 0;
biflam = NULL;
biflam2Len = 0;
biflam2 = NULL;
energy14BlockBuffer = NULL;
h_in14TblBlockPos = NULL;
h_ex14TblBlockPos = NULL;
// lowTriangleIbJB = (jb << 16) | ib
int m = cudaBlock.getNumBlock()*(cudaBlock.getNumBlock()+1)/2;
int *h_lowTriangleIJ = new int[m];
int k = 0;
for (int ib=0;ib < cudaBlock.getNumBlock();ib++) {
for (int jb=0;jb <= ib;jb++) {
h_lowTriangleIJ[k] = (jb << 16) | ib;
k++;
}
}
allocate<int>(&lowTriangleIJ, m);
copy_HtoD_sync<int>(h_lowTriangleIJ, lowTriangleIJ, m);
delete [] h_lowTriangleIJ;
}
//
// Class destructor
//
template <typename AT, typename CT>
CudaPMEDirectForceBlock<AT, CT>::~CudaPMEDirectForceBlock() {
if (biflam != NULL) deallocate<AT>(&biflam);
if (biflam2 != NULL) deallocate<AT>(&biflam2);
if (energy14BlockBuffer != NULL) deallocate<double>(&energy14BlockBuffer);
if (h_in14TblBlockPos != NULL) delete [] h_in14TblBlockPos;
if (h_ex14TblBlockPos != NULL) delete [] h_ex14TblBlockPos;
deallocate<int>(&lowTriangleIJ);
}
//
// Set values for 1-4 block position tables
//
template <typename AT, typename CT>
void CudaPMEDirectForceBlock<AT, CT>::set14BlockPos(int *h_in14TblBlockPos_in, int *h_ex14TblBlockPos_in) {
int m = cudaBlock.getNumBlock()*(cudaBlock.getNumBlock()+1)/2;
if (h_in14TblBlockPos == NULL) h_in14TblBlockPos = new int[m+1];
if (h_ex14TblBlockPos == NULL) h_ex14TblBlockPos = new int[m+1];
for (int i=0;i < m+1;i++) {
h_in14TblBlockPos[i] = h_in14TblBlockPos_in[i];
h_ex14TblBlockPos[i] = h_ex14TblBlockPos_in[i];
}
}
//
// Calculates 1-4 exclusions and interactions
//
template <typename AT, typename CT>
void CudaPMEDirectForceBlock<AT, CT>::calc_14_force(const float4 *xyzq,
const bool calc_energy, const bool calc_virial,
const int stride, AT *force, cudaStream_t stream) {
if (this->use_tex_vdwparam14) {
#ifdef USE_TEXTURE_OBJECTS
if (!this->vdwParam14TexObjActive) {
std::cerr << "CudaPMEDirectForceBlock<AT, CT>::calc_14_force, vdwParam14TexObj must be created" << std::endl;
exit(1);
}
#else
if (!get_vdwparam14_texref_bound()) {
std::cerr << "CudaPMEDirectForceBlock<AT, CT>::calc_14_force, vdwparam14_texref must be bound" << std::endl;
exit(1);
}
#endif
}
int nthread = 512;
int shmem_size = 0;
if (calc_energy) {
shmem_size = nthread*sizeof(double2);
}
int vdw_model_loc = this->calc_vdw ? this->vdw_model : NONE;
int elec_model_loc = this->calc_elec ? this->elec_model : NONE;
if (elec_model_loc == NONE && vdw_model_loc == NONE) return;
int m = cudaBlock.getNumBlock()*(cudaBlock.getNumBlock()+1)/2;
if (calc_energy) {
if (energy14BlockBuffer == NULL) {
allocate<double>(&energy14BlockBuffer, m*3);
energyVdw14Block = &energy14BlockBuffer[0];
energyElec14Block = &energy14BlockBuffer[m];
energyExcl14Block = &energy14BlockBuffer[m*2];
}
clear_gpu_array<double>(energy14BlockBuffer, m*3, stream);
}
for (int k=0;k < m;k++) {
float fscale = cudaBlock.getBlockParamValue(k);
int pos_in14 = h_in14TblBlockPos[k];
int num_in14 = h_in14TblBlockPos[k+1] - h_in14TblBlockPos[k];
int pos_ex14 = h_ex14TblBlockPos[k];
int num_ex14 = h_ex14TblBlockPos[k+1] - h_ex14TblBlockPos[k];
int nin14block = (num_in14 - 1)/nthread + 1;
int nex14block = (num_ex14 - 1)/nthread + 1;
int nblock = nin14block + nex14block;
calcForce14KernelChoice<AT,CT>(nblock, nthread, shmem_size, stream,
vdw_model_loc, elec_model_loc, calc_energy, calc_virial,
num_in14, &this->in14list[pos_in14],
num_ex14, &this->ex14list[pos_ex14],
nin14block, this->vdwtype, this->vdwparam14,
#ifdef USE_TEXTURE_OBJECTS
this->vdwParam14TexObj,
#endif
xyzq, fscale, stride, force,
this->energyVirial.getVirialPointer(),
&energyVdw14Block[k], &energyElec14Block[k],
&energyExcl14Block[k]);
}
if (calc_energy) {
nthread = min( ((m-1)/warpsize+1)*warpsize, get_max_nthread());
shmem_size = min(m, nthread)*3*sizeof(double);
// Check if we want too much shared memory (this should not happen)
if (shmem_size > get_max_shmem_size()) {
std::cout << "CudaPMEDirectForceBlock::calc_14_force, amount of shared memory exceeded" << std::endl;
exit(1);
}
int nblock = (m - 1)/nthread + 1;
merge14ResultsKernel<<< nblock, nthread, shmem_size, stream >>>
(m, lowTriangleIJ, cudaBlock.getBlockParam(), cudaBlock.getSiteMLD(), cudaBlock.getBixlam(),
energyVdw14Block, energyElec14Block, energyExcl14Block,
cudaBlock.getBiflam(), cudaBlock.getBiflam2(),
this->energyVirial.getEnergyPointer(this->strVdw),
this->energyVirial.getEnergyPointer(this->strElec),
this->energyVirial.getEnergyPointer(this->strExcl));
cudaCheck(cudaGetLastError());
}
}
//
// Calculates direct force
//
template <typename AT, typename CT>
void CudaPMEDirectForceBlock<AT, CT>::calc_force(const float4 *xyzq,
const CudaNeighborListBuild<32>& nlist,
const bool calc_energy,
const bool calc_virial,
const int stride, AT *force,
cudaStream_t stream) {
#ifdef USE_TEXTURE_OBJECTS
if (!this->vdwParamTexObjActive) {
std::cerr << "CudaPMEDirectForceBlock<AT, CT>::calc_force, vdwParamTexObj must be created" << std::endl;
exit(1);
}
#else
if (!get_vdwparam_texref_bound()) {
std::cerr << "CudaPMEDirectForceBlock<AT, CT>::calc_force, vdwparam_texref must be bound"
<< std::endl;
exit(1);
}
#endif
#ifndef NUMBLOCK_LARGE
if (cudaBlock.getNumBlock() > 512) {
std::cerr << "CudaPMEDirectForceBlock<AT, CT>::calc_force, numBlock > 512 is not currently allowed" << std::endl;
exit(1);
}
#endif
// Re-allocate biflam and biflam2
reallocate<AT>(&biflam, &biflamLen, cudaBlock.getNumBlock());
reallocate<AT>(&biflam2, &biflam2Len, cudaBlock.getNumBlock());
// Clear biflam and biflam2
clear_gpu_array<AT>(biflam, cudaBlock.getNumBlock(), stream);
clear_gpu_array<AT>(biflam2, cudaBlock.getNumBlock(), stream);
if (nlist.get_n_ientry() == 0) return;
int vdw_model_loc = this->calc_vdw ? this->vdw_model : NONE;
int elec_model_loc = this->calc_elec ? this->elec_model : NONE;
if (elec_model_loc == NONE && vdw_model_loc == NONE) return;
int nwarp = 2;
if (get_cuda_arch() < 300) {
nwarp = 2;
} else {
nwarp = 4;
}
int nthread = warpsize*nwarp;
int nblock_tot = (nlist.get_n_ientry()-1)/(nthread/warpsize)+1;
int shmem_size = 0;
// (sh_xi, sh_yi, sh_zi, sh_qi, sh_vdwtypei, sh_blocktypei)
if (get_cuda_arch() < 300)
shmem_size += (nthread/warpsize)*tilesize*(sizeof(float)*4 + sizeof(int) + sizeof(int));
#ifndef NUMBLOCK_LARGE
shmem_size += cudaBlock.getNumBlock()*sizeof(float);
#endif
// (sh_fix, sh_fiy, sh_fiz)
shmem_size += (nthread/warpsize)*warpsize*sizeof(AT)*3;
// If no texture fetch for vdwparam:
//shmem_size += nvdwparam*sizeof(float);
if (calc_energy) shmem_size = max(shmem_size, (int)(nthread*sizeof(double)*2));
if (calc_virial) shmem_size = max(shmem_size, (int)(nthread*sizeof(double)*3));
calcForceKernelChoice<AT,CT>(nblock_tot, nthread, shmem_size, stream,
vdw_model_loc, elec_model_loc, calc_energy, calc_virial,
nlist, this->vdwparam, this->nvdwparam, this->vdwtype,
#ifdef USE_TEXTURE_OBJECTS
this->vdwParamTexObj,
#endif
xyzq, stride, force,
this->energyVirial.getVirialPointer(),
this->energyVirial.getEnergyPointer(this->strVdw),
this->energyVirial.getEnergyPointer(this->strElec),
&cudaBlock, this->biflam, this->biflam2);
// Convert biflam and biflam2 into double precision and add to cudaBlock.biflam -arrays
if (calc_energy) {
mergeNonbondResultsKernel<<< (cudaBlock.getNumBlock()-1)/64+1, 64, 0, stream >>>
(cudaBlock.getNumBlock(), biflam, biflam2, cudaBlock.getBiflam(), cudaBlock.getBiflam2());
cudaCheck(cudaGetLastError());
}
}
//
// Explicit instances of CudaPMEDirectForceBlock
//
template class CudaPMEDirectForceBlock<long long int, float>;
|
2536e4648e223d9108fdd1808aa3caae1cb5d432.hip | // !!! This is a file automatically generated by hipify!!!
/*Raul P. Pelaez 2017. PairForces definition.
PairForces Module is an interactor that computes short range forces.
Computes the interaction between neighbour particles (pairs of particles closer tan rcut).
If the value of rcut reaches a certain threshold, the computation will be
For that, it uses a NeighbourList or an Nbody interaction and computes the force given by Potential for each pair of particles. It sums the force for all neighbours of every particle.
See https://github.com/RaulPPelaez/UAMMD/wiki/Pair-Forces for more info.
See misc/Potential.cuh and https://github.com/RaulPPelaez/UAMMD/wiki/Potential for more info on potentials and how to implement them.
*/
#include<third_party/hipcub/hipcub.hpp>
#include"utils/GPUUtils.cuh"
namespace uammd{
template<class Potential, class NL>
PairForces<Potential, NL>::PairForces(shared_ptr<ParticleData> pd,
shared_ptr<ParticleGroup> pg,
shared_ptr<System> sys,
Parameters par,
shared_ptr<Potential> pot):
Interactor(pd, pg, sys,
"PairForces/" +
stringUtils::removePattern(type_name<NL>(), "uammd::") +
"/" +
stringUtils::removePattern(type_name<Potential>(), "uammd::")),
box(par.box),
pot(pot),
nl(nullptr),
nb(nullptr)
{
this->setDelegate(pot.get());
}
template<class Potential, class NL>
void PairForces<Potential, NL>::sumForce(hipStream_t st){
sys->log<System::DEBUG1>("[PairForces] Summing forces");
this->rcut = pot->getCutOff();
sys->log<System::DEBUG3>("[PairForces] Using cutOff: %f", this->rcut);
bool useNeighbourList = true;
//If the cutoff distance is too high, fall back to an NBody interaction
int3 ncells = make_int3(box.boxSize/rcut+0.5);
if(ncells.x <=3 || ncells.y <= 3 || ncells.z <=3){
useNeighbourList = false;
}
auto ft = pot->getForceTransverser(box, pd);
if(useNeighbourList){
if(!nl){
//A neighbour list must know just my system information at construction
nl = std::make_shared<NL>(pd, pg, sys);
}
//Update neighbour list. It is smart enough to do it only when it is necessary,
// so do not fear calling it several times.
nl->updateNeighbourList(box, rcut, st);
sys->log<System::DEBUG3>("[PairForces] Transversing neighbour list");
nl->transverseList(ft, st);
}
else{
if(!nb){
nb = std::make_shared<NBody>(pd, pg, sys);
}
sys->log<System::DEBUG3>("[PairForces] Transversing NBody");
nb->transverse(ft, st);
}
}
template<class Potential, class NL>
real PairForces<Potential, NL>::sumEnergy(){
hipStream_t st = 0;
sys->log<System::DEBUG1>("[PairForces] Summing Energy");
this->rcut = pot->getCutOff();
sys->log<System::DEBUG3>("[PairForces] Using cutOff: %f", this->rcut);
bool useNeighbourList = true;
//If the cutoff distance is too high, fall back to an NBody interaction
int3 ncells = make_int3(box.boxSize/rcut+0.5);
if(ncells.x <=3 || ncells.y <= 3 || ncells.z <=3){
useNeighbourList = false;
}
auto ft = pot->getEnergyTransverser(box, pd);
if(useNeighbourList){
if(!nl){
//A neighbour list must know just my system information at construction
nl = std::make_shared<NL>(pd, pg, sys);
}
//Update neighbour list. It is smart enough to do it only when it is necessary,
// so do not fear calling it several times.
nl->updateNeighbourList(box, rcut, st);
sys->log<System::DEBUG3>("[PairForces] Transversing neighbour list");
nl->transverseList(ft, st);
}
else{
if(!nb){
nb = std::make_shared<NBody>(pd, pg, sys);
}
sys->log<System::DEBUG3>("[PairForces] Transversing NBody");
nb->transverse(ft, st);
}
return 0;
}
} | 2536e4648e223d9108fdd1808aa3caae1cb5d432.cu | /*Raul P. Pelaez 2017. PairForces definition.
PairForces Module is an interactor that computes short range forces.
Computes the interaction between neighbour particles (pairs of particles closer tan rcut).
If the value of rcut reaches a certain threshold, the computation will be
For that, it uses a NeighbourList or an Nbody interaction and computes the force given by Potential for each pair of particles. It sums the force for all neighbours of every particle.
See https://github.com/RaulPPelaez/UAMMD/wiki/Pair-Forces for more info.
See misc/Potential.cuh and https://github.com/RaulPPelaez/UAMMD/wiki/Potential for more info on potentials and how to implement them.
*/
#include<third_party/cub/cub.cuh>
#include"utils/GPUUtils.cuh"
namespace uammd{
template<class Potential, class NL>
PairForces<Potential, NL>::PairForces(shared_ptr<ParticleData> pd,
shared_ptr<ParticleGroup> pg,
shared_ptr<System> sys,
Parameters par,
shared_ptr<Potential> pot):
Interactor(pd, pg, sys,
"PairForces/" +
stringUtils::removePattern(type_name<NL>(), "uammd::") +
"/" +
stringUtils::removePattern(type_name<Potential>(), "uammd::")),
box(par.box),
pot(pot),
nl(nullptr),
nb(nullptr)
{
this->setDelegate(pot.get());
}
template<class Potential, class NL>
void PairForces<Potential, NL>::sumForce(cudaStream_t st){
sys->log<System::DEBUG1>("[PairForces] Summing forces");
this->rcut = pot->getCutOff();
sys->log<System::DEBUG3>("[PairForces] Using cutOff: %f", this->rcut);
bool useNeighbourList = true;
//If the cutoff distance is too high, fall back to an NBody interaction
int3 ncells = make_int3(box.boxSize/rcut+0.5);
if(ncells.x <=3 || ncells.y <= 3 || ncells.z <=3){
useNeighbourList = false;
}
auto ft = pot->getForceTransverser(box, pd);
if(useNeighbourList){
if(!nl){
//A neighbour list must know just my system information at construction
nl = std::make_shared<NL>(pd, pg, sys);
}
//Update neighbour list. It is smart enough to do it only when it is necessary,
// so do not fear calling it several times.
nl->updateNeighbourList(box, rcut, st);
sys->log<System::DEBUG3>("[PairForces] Transversing neighbour list");
nl->transverseList(ft, st);
}
else{
if(!nb){
nb = std::make_shared<NBody>(pd, pg, sys);
}
sys->log<System::DEBUG3>("[PairForces] Transversing NBody");
nb->transverse(ft, st);
}
}
template<class Potential, class NL>
real PairForces<Potential, NL>::sumEnergy(){
cudaStream_t st = 0;
sys->log<System::DEBUG1>("[PairForces] Summing Energy");
this->rcut = pot->getCutOff();
sys->log<System::DEBUG3>("[PairForces] Using cutOff: %f", this->rcut);
bool useNeighbourList = true;
//If the cutoff distance is too high, fall back to an NBody interaction
int3 ncells = make_int3(box.boxSize/rcut+0.5);
if(ncells.x <=3 || ncells.y <= 3 || ncells.z <=3){
useNeighbourList = false;
}
auto ft = pot->getEnergyTransverser(box, pd);
if(useNeighbourList){
if(!nl){
//A neighbour list must know just my system information at construction
nl = std::make_shared<NL>(pd, pg, sys);
}
//Update neighbour list. It is smart enough to do it only when it is necessary,
// so do not fear calling it several times.
nl->updateNeighbourList(box, rcut, st);
sys->log<System::DEBUG3>("[PairForces] Transversing neighbour list");
nl->transverseList(ft, st);
}
else{
if(!nb){
nb = std::make_shared<NBody>(pd, pg, sys);
}
sys->log<System::DEBUG3>("[PairForces] Transversing NBody");
nb->transverse(ft, st);
}
return 0;
}
} |
28d2a79afbc342e8a1977f6b43d4c4e049e614aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "HelpersHost.cu.h"
#include <stdio.h>
#define NUM 4
__global__ void
coalescedKernel( int* d_in,
int* d_out,
unsigned int d_size
) {
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < d_size) {
int k = d_in[gid];
int tuple[NUM];
for (int i = 0; i < NUM; i++) {
tuple[i] = 0;
}
tuple[k] = 1;
for (int i = 0; i < NUM; i++) {
d_out[d_size*i+gid] = tuple[i];
}
}
}
__global__ void
nonCoalescedKernel( int* d_in,
int* d_out,
unsigned int d_size
) {
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < d_size) {
int k = d_in[gid];
int tuple[NUM];
for (int i = 0; i < NUM; i++) {
tuple[i] = 0;
}
tuple[k] = 1;
for (int i = 0; i < NUM; i++) {
d_out[NUM*gid+i] = tuple[i];
}
}
}
int main(int argc, char** argv) {
if (argc != 2) {
printf("The program takes <num_elems> as argument!\n");
return EXIT_FAILURE;
}
struct timeval t_diff, t_start_coalesced, t_end_coalesced,
t_start_non_coalesced, t_end_non_coalesced;
unsigned long int elapsed_coalesced, elapsed_non_coalesced;
const unsigned int num_elems = strtoul(argv[1], NULL, 10); //40000000; //50332001;
unsigned int mem_size = num_elems * sizeof(int);
// Allocate memory.
int* h_in = (int*) malloc(mem_size);
int* h_out_coalesced = (int*) malloc(NUM*mem_size);
int* h_out_non_coalesced = (int*) malloc(NUM*mem_size);
{ // Initialize array.
std::srand(33);
for(unsigned int i = 0; i < num_elems; i++) {
h_in[i] = std::rand() % NUM;
}
}
int *d_in, *d_out_coalesced, *d_out_non_coalesced;
{ // Device allocation.
hipMalloc((void**)&d_in, mem_size);
hipMalloc((void**)&d_out_coalesced, NUM*mem_size);
hipMalloc((void**)&d_out_non_coalesced, NUM*mem_size);
// Copy host memory to device.
hipMemcpy(d_in, h_in, mem_size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
}
// Sizes for the kernels.
unsigned int block_size = getBlockSize(num_elems);
unsigned int num_blocks = getNumBlocks(num_elems, block_size);
// Call the kernels.
gettimeofday(&t_start_coalesced, NULL);
hipLaunchKernelGGL(( coalescedKernel), dim3(num_blocks), dim3(block_size), 0, 0, d_in, d_out_coalesced, num_elems);
hipDeviceSynchronize();
gettimeofday(&t_end_coalesced, NULL);
gettimeofday(&t_start_non_coalesced, NULL);
hipLaunchKernelGGL(( nonCoalescedKernel), dim3(num_blocks), dim3(block_size), 0, 0, d_in, d_out_non_coalesced, num_elems);
hipDeviceSynchronize();
gettimeofday(&t_end_non_coalesced, NULL);
timeval_subtract(&t_diff, &t_end_coalesced, &t_start_coalesced);
elapsed_coalesced = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Coalesced runtime: %lu microsecs\n", elapsed_coalesced);
timeval_subtract(&t_diff, &t_end_non_coalesced, &t_start_non_coalesced);
elapsed_non_coalesced = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Non-coalesced runtime: %lu microsecs\n", elapsed_non_coalesced);
printf("Percent improvement: %.2f%%\n", (1.0-(float)elapsed_coalesced/elapsed_non_coalesced)*100.0);
// Copy result back to host.
hipMemcpy(h_out_coalesced, d_out_coalesced, NUM*mem_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipMemcpy(h_out_non_coalesced, d_out_non_coalesced, NUM*mem_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Validation.
bool success = true;
for (int gid = 0; gid < num_elems; gid++) {
int k = h_in[gid];
for (int i = 0; i < NUM; i++) {
int element_coalesced = h_out_coalesced[num_elems*i+gid];
int element_non_coalesced = h_out_non_coalesced[NUM*gid+i];
if (i == k) {
if (element_coalesced != 1) {
printf("Coalesced violation: %d should be 1, gid: %d, i: %d\n", element_coalesced, gid, i);
success = false;
}
if (element_non_coalesced != 1) {
printf("Non-coalesced violation: %d should be 1, gid: %d, i: %d\n", element_non_coalesced, gid, i);
success = false;
}
}
else {
if (element_coalesced != 0) {
printf("Coalesced violation: %d should be 0, gid: %d, i: %d\n", element_coalesced, gid, i);
success = false;
}
if (element_non_coalesced != 0) {
printf("Non-coalesced violation: %d should be 0, gid: %d, i: %d\n", element_non_coalesced, gid, i);
success = false;
}
}
}
}
if (success) {
printf("CoalescedTest on %d elems: VALID RESULT!\n", num_elems);
}
else {
printf("CoalescedTest on %d elems: INVALID RESULT!\n", num_elems);
}
// Free device memory.
hipFree(d_in );
hipFree(d_out_coalesced);
hipFree(d_out_non_coalesced);
// Cleanup memory.
free(h_in );
free(h_out_coalesced);
free(h_out_non_coalesced);
return 0;
}
| 28d2a79afbc342e8a1977f6b43d4c4e049e614aa.cu | #include "HelpersHost.cu.h"
#include <stdio.h>
#define NUM 4
__global__ void
coalescedKernel( int* d_in,
int* d_out,
unsigned int d_size
) {
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < d_size) {
int k = d_in[gid];
int tuple[NUM];
for (int i = 0; i < NUM; i++) {
tuple[i] = 0;
}
tuple[k] = 1;
for (int i = 0; i < NUM; i++) {
d_out[d_size*i+gid] = tuple[i];
}
}
}
__global__ void
nonCoalescedKernel( int* d_in,
int* d_out,
unsigned int d_size
) {
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < d_size) {
int k = d_in[gid];
int tuple[NUM];
for (int i = 0; i < NUM; i++) {
tuple[i] = 0;
}
tuple[k] = 1;
for (int i = 0; i < NUM; i++) {
d_out[NUM*gid+i] = tuple[i];
}
}
}
int main(int argc, char** argv) {
if (argc != 2) {
printf("The program takes <num_elems> as argument!\n");
return EXIT_FAILURE;
}
struct timeval t_diff, t_start_coalesced, t_end_coalesced,
t_start_non_coalesced, t_end_non_coalesced;
unsigned long int elapsed_coalesced, elapsed_non_coalesced;
const unsigned int num_elems = strtoul(argv[1], NULL, 10); //40000000; //50332001;
unsigned int mem_size = num_elems * sizeof(int);
// Allocate memory.
int* h_in = (int*) malloc(mem_size);
int* h_out_coalesced = (int*) malloc(NUM*mem_size);
int* h_out_non_coalesced = (int*) malloc(NUM*mem_size);
{ // Initialize array.
std::srand(33);
for(unsigned int i = 0; i < num_elems; i++) {
h_in[i] = std::rand() % NUM;
}
}
int *d_in, *d_out_coalesced, *d_out_non_coalesced;
{ // Device allocation.
cudaMalloc((void**)&d_in, mem_size);
cudaMalloc((void**)&d_out_coalesced, NUM*mem_size);
cudaMalloc((void**)&d_out_non_coalesced, NUM*mem_size);
// Copy host memory to device.
cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice);
cudaThreadSynchronize();
}
// Sizes for the kernels.
unsigned int block_size = getBlockSize(num_elems);
unsigned int num_blocks = getNumBlocks(num_elems, block_size);
// Call the kernels.
gettimeofday(&t_start_coalesced, NULL);
coalescedKernel<<<num_blocks, block_size>>>(d_in, d_out_coalesced, num_elems);
cudaThreadSynchronize();
gettimeofday(&t_end_coalesced, NULL);
gettimeofday(&t_start_non_coalesced, NULL);
nonCoalescedKernel<<<num_blocks, block_size>>>(d_in, d_out_non_coalesced, num_elems);
cudaThreadSynchronize();
gettimeofday(&t_end_non_coalesced, NULL);
timeval_subtract(&t_diff, &t_end_coalesced, &t_start_coalesced);
elapsed_coalesced = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Coalesced runtime: %lu microsecs\n", elapsed_coalesced);
timeval_subtract(&t_diff, &t_end_non_coalesced, &t_start_non_coalesced);
elapsed_non_coalesced = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Non-coalesced runtime: %lu microsecs\n", elapsed_non_coalesced);
printf("Percent improvement: %.2f%%\n", (1.0-(float)elapsed_coalesced/elapsed_non_coalesced)*100.0);
// Copy result back to host.
cudaMemcpy(h_out_coalesced, d_out_coalesced, NUM*mem_size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaMemcpy(h_out_non_coalesced, d_out_non_coalesced, NUM*mem_size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// Validation.
bool success = true;
for (int gid = 0; gid < num_elems; gid++) {
int k = h_in[gid];
for (int i = 0; i < NUM; i++) {
int element_coalesced = h_out_coalesced[num_elems*i+gid];
int element_non_coalesced = h_out_non_coalesced[NUM*gid+i];
if (i == k) {
if (element_coalesced != 1) {
printf("Coalesced violation: %d should be 1, gid: %d, i: %d\n", element_coalesced, gid, i);
success = false;
}
if (element_non_coalesced != 1) {
printf("Non-coalesced violation: %d should be 1, gid: %d, i: %d\n", element_non_coalesced, gid, i);
success = false;
}
}
else {
if (element_coalesced != 0) {
printf("Coalesced violation: %d should be 0, gid: %d, i: %d\n", element_coalesced, gid, i);
success = false;
}
if (element_non_coalesced != 0) {
printf("Non-coalesced violation: %d should be 0, gid: %d, i: %d\n", element_non_coalesced, gid, i);
success = false;
}
}
}
}
if (success) {
printf("CoalescedTest on %d elems: VALID RESULT!\n", num_elems);
}
else {
printf("CoalescedTest on %d elems: INVALID RESULT!\n", num_elems);
}
// Free device memory.
cudaFree(d_in );
cudaFree(d_out_coalesced);
cudaFree(d_out_non_coalesced);
// Cleanup memory.
free(h_in );
free(h_out_coalesced);
free(h_out_non_coalesced);
return 0;
}
|
f373ee49e3c8c1abe1de250e27b909ea1403f836.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************
streamcluster_cuda.cu
: parallelized code of streamcluster
- original code from PARSEC Benchmark Suite
- parallelization with CUDA API has been applied by
Shawn Sang-Ha Lee - [email protected]
University of Virginia
Department of Electrical and Computer Engineering
Department of Computer Science
***********************************************/
#include "streamcluster_header.cu"
using namespace std;
// AUTO-ERROR CHECK FOR ALL CUDA FUNCTIONS
#define CUDA_SAFE_CALL( call) do { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define THREADS_PER_BLOCK 512
#define MAXBLOCKS 65536
#define CUDATIME
bool change_center_table = true;
bool change_p = true;
bool change_switch_membership = true;
// host memory
float *work_mem_h;
float *coord_h;
// device memory
float *work_mem_d;
float *coord_d;
int *center_table_d;
bool *switch_membership_d;
Point *p;
static int iter = 0; // counter for total# of iteration
//=======================================
// Euclidean Distance
//=======================================
__device__ float
d_dist(int p1, int p2, int num, int dim, float *coord_d)
{
float retval = 0.0;
for(int i = 0; i < dim; i++){
float tmp = coord_d[(i*num)+p1] - coord_d[(i*num)+p2];
retval += tmp * tmp;
}
return retval;
}
//=======================================
// Kernel - Compute Cost
//=======================================
__global__ void
kernel_compute_cost(int num, int dim, long x, Point *p, int K, int stride,
float *coord_d, float *work_mem_d, int *center_table_d, bool *switch_membership_d)
{
// block ID and global thread ID
const int bid = blockIdx.x + gridDim.x * blockIdx.y;
const int tid = blockDim.x * bid + threadIdx.x;
if(tid < num)
{
float *lower = &work_mem_d[tid*stride];
// cost between this point and point[x]: euclidean distance multiplied by weight
float x_cost = d_dist(tid, x, num, dim, coord_d) * p[tid].weight;
// if computed cost is less then original (it saves), mark it as to reassign
if ( x_cost < p[tid].cost )
{
switch_membership_d[tid] = 1;
lower[K] += x_cost - p[tid].cost;
}
// if computed cost is larger, save the difference
else
{
lower[center_table_d[p[tid].assign]] += p[tid].cost - x_cost;
}
}
}
//=======================================
// Allocate Device Memory
//=======================================
void allocDevMem(int num, int dim)
{
CUDA_SAFE_CALL( hipMalloc((void**) ¢er_table_d, num * sizeof(int)) );
CUDA_SAFE_CALL( hipMalloc((void**) &switch_membership_d, num * sizeof(bool)) );
CUDA_SAFE_CALL( hipMalloc((void**) &p, num * sizeof(Point)) );
CUDA_SAFE_CALL( hipMalloc((void**) &coord_d, num * dim * sizeof(float)) );
}
//=======================================
// Allocate Host Memory
//=======================================
void allocHostMem(int num, int dim)
{
coord_h = (float*) malloc( num * dim * sizeof(float) );
}
//=======================================
// Free Device Memory
//=======================================
void freeDevMem()
{
CUDA_SAFE_CALL( hipFree(center_table_d) );
CUDA_SAFE_CALL( hipFree(switch_membership_d) );
CUDA_SAFE_CALL( hipFree(p) );
CUDA_SAFE_CALL( hipFree(coord_d) );
}
//=======================================
// Free Host Memory
//=======================================
void freeHostMem()
{
free(coord_h);
}
//=======================================
// pgain Entry - CUDA SETUP + CUDA CALL
//=======================================
float pgain( long x, Points *points, float z, long int *numcenters, int kmax, bool *is_center, int *center_table, bool *switch_membership, bool isCoordChanged,
double *serial_t, double *cpu_to_gpu_t, double *gpu_to_cpu_t, double *alloc_t, double *kernel_t, double *free_t)
{
#ifdef CUDATIME
float tmp_t;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
hipError_t error;
int stride = *numcenters + 1; // size of each work_mem segment
int K = *numcenters ; // number of centers
int num = points->num; // number of points
int dim = points->dim; // number of dimension
int nThread = num; // number of threads == number of data points
//=========================================
// ALLOCATE HOST MEMORY + DATA PREPARATION
//=========================================
work_mem_h = (float*) malloc(stride * (nThread + 1) * sizeof(float) );
// Only on the first iteration
if(iter == 0)
{
allocHostMem(num, dim);
}
// build center-index table
int count = 0;
for( int i=0; i<num; i++)
{
if( is_center[i] )
{
change_center_table = true;
center_table[i] = count++;
}
}
// Extract 'coord'
// Only if first iteration OR coord has changed
if(isCoordChanged || iter == 0)
{
for(int i=0; i<dim; i++)
{
for(int j=0; j<num; j++)
{
coord_h[ (num*i)+j ] = points->p[j].coord[i];
}
}
}
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*serial_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// ALLOCATE GPU MEMORY
//=======================================
CUDA_SAFE_CALL( hipMalloc((void**) &work_mem_d, stride * (nThread + 1) * sizeof(float)) );
// Only on the first iteration
if( iter == 0 )
{
allocDevMem(num, dim);
}
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*alloc_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// CPU-TO-GPU MEMORY COPY
//=======================================
// Only if first iteration OR coord has changed
if(isCoordChanged || iter == 0)
{
CUDA_SAFE_CALL( hipMemcpy(coord_d, coord_h, num * dim * sizeof(float), hipMemcpyHostToDevice) );
}
if (change_center_table) {
CUDA_SAFE_CALL( hipMemcpy(center_table_d, center_table, num * sizeof(int), hipMemcpyHostToDevice) );
}
change_center_table = false;
if (change_p) {
CUDA_SAFE_CALL( hipMemcpy(p, points->p, num * sizeof(Point), hipMemcpyHostToDevice) );
}
change_p = false;
if (change_switch_membership) {
CUDA_SAFE_CALL( hipMemset((void*) switch_membership_d, 0, num * sizeof(bool)) );
}
change_switch_membership = false;
CUDA_SAFE_CALL( hipMemset((void*) work_mem_d, 0, stride * (nThread + 1) * sizeof(float)) );
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*cpu_to_gpu_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// KERNEL: CALCULATE COST
//=======================================
// Determine the number of thread blocks in the x- and y-dimension
int num_blocks = (int) ((float) (num + THREADS_PER_BLOCK - 1) / (float) THREADS_PER_BLOCK);
int num_blocks_y = (int) ((float) (num_blocks + MAXBLOCKS - 1) / (float) MAXBLOCKS);
int num_blocks_x = (int) ((float) (num_blocks+num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
hipLaunchKernelGGL(( kernel_compute_cost), dim3(grid_size), dim3(THREADS_PER_BLOCK), 0, 0,
num, // in: # of data
dim, // in: dimension of point coordinates
x, // in: point to open a center at
p, // in: data point array
K, // in: number of centers
stride, // in: size of each work_mem segment
coord_d, // in: array of point coordinates
work_mem_d, // out: cost and lower field array
center_table_d, // in: center index table
switch_membership_d // out: changes in membership
);
hipDeviceSynchronize();
// error check
error = hipGetLastError();
if (error != hipSuccess)
{
printf("kernel error: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*kernel_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// GPU-TO-CPU MEMORY COPY
//=======================================
CUDA_SAFE_CALL( hipMemcpy(work_mem_h, work_mem_d, stride * (nThread + 1) * sizeof(float), hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipMemcpy(switch_membership, switch_membership_d, num * sizeof(bool), hipMemcpyDeviceToHost) );
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*gpu_to_cpu_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// CPU (SERIAL) WORK
//=======================================
int number_of_centers_to_close = 0;
float gl_cost_of_opening_x = z;
float *gl_lower = &work_mem_h[stride * nThread];
// compute the number of centers to close if we are to open i
for(int i=0; i < num; i++)
{
if( is_center[i] )
{
float low = z;
for( int j = 0; j < num; j++ )
{
low += work_mem_h[ j*stride + center_table[i] ];
}
gl_lower[center_table[i]] = low;
if ( low > 0 )
{
++number_of_centers_to_close;
work_mem_h[i*stride+K] -= low;
}
}
gl_cost_of_opening_x += work_mem_h[i*stride+K];
}
for(int i = 0; i < num; i++)
{
if ( switch_membership[i] ) {
change_switch_membership = true;
}
}
//if opening a center at x saves cost (i.e. cost is negative) do so; otherwise, do nothing
if ( gl_cost_of_opening_x < 0 )
{
for(int i = 0; i < num; i++)
{
bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ;
if ( switch_membership[i] || close_center )
{
change_p = true;
points->p[i].cost = dist(points->p[i], points->p[x], dim) * points->p[i].weight;
points->p[i].assign = x;
}
}
for(int i = 0; i < num; i++)
{
if( is_center[i] && gl_lower[center_table[i]] > 0 )
{
is_center[i] = false;
}
}
if( x >= 0 && x < num)
{
is_center[x] = true;
}
*numcenters = *numcenters + 1 - number_of_centers_to_close;
}
else
{
gl_cost_of_opening_x = 0;
}
//=======================================
// DEALLOCATE HOST MEMORY
//=======================================
free(work_mem_h);
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*serial_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// DEALLOCATE GPU MEMORY
//=======================================
CUDA_SAFE_CALL( hipFree(work_mem_d) );
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*free_t += (double) tmp_t;
#endif
iter++;
return -gl_cost_of_opening_x;
}
| f373ee49e3c8c1abe1de250e27b909ea1403f836.cu | /***********************************************
streamcluster_cuda.cu
: parallelized code of streamcluster
- original code from PARSEC Benchmark Suite
- parallelization with CUDA API has been applied by
Shawn Sang-Ha Lee - [email protected]
University of Virginia
Department of Electrical and Computer Engineering
Department of Computer Science
***********************************************/
#include "streamcluster_header.cu"
using namespace std;
// AUTO-ERROR CHECK FOR ALL CUDA FUNCTIONS
#define CUDA_SAFE_CALL( call) do { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define THREADS_PER_BLOCK 512
#define MAXBLOCKS 65536
#define CUDATIME
bool change_center_table = true;
bool change_p = true;
bool change_switch_membership = true;
// host memory
float *work_mem_h;
float *coord_h;
// device memory
float *work_mem_d;
float *coord_d;
int *center_table_d;
bool *switch_membership_d;
Point *p;
static int iter = 0; // counter for total# of iteration
//=======================================
// Euclidean Distance
//=======================================
__device__ float
d_dist(int p1, int p2, int num, int dim, float *coord_d)
{
float retval = 0.0;
for(int i = 0; i < dim; i++){
float tmp = coord_d[(i*num)+p1] - coord_d[(i*num)+p2];
retval += tmp * tmp;
}
return retval;
}
//=======================================
// Kernel - Compute Cost
//=======================================
__global__ void
kernel_compute_cost(int num, int dim, long x, Point *p, int K, int stride,
float *coord_d, float *work_mem_d, int *center_table_d, bool *switch_membership_d)
{
// block ID and global thread ID
const int bid = blockIdx.x + gridDim.x * blockIdx.y;
const int tid = blockDim.x * bid + threadIdx.x;
if(tid < num)
{
float *lower = &work_mem_d[tid*stride];
// cost between this point and point[x]: euclidean distance multiplied by weight
float x_cost = d_dist(tid, x, num, dim, coord_d) * p[tid].weight;
// if computed cost is less then original (it saves), mark it as to reassign
if ( x_cost < p[tid].cost )
{
switch_membership_d[tid] = 1;
lower[K] += x_cost - p[tid].cost;
}
// if computed cost is larger, save the difference
else
{
lower[center_table_d[p[tid].assign]] += p[tid].cost - x_cost;
}
}
}
//=======================================
// Allocate Device Memory
//=======================================
void allocDevMem(int num, int dim)
{
CUDA_SAFE_CALL( cudaMalloc((void**) ¢er_table_d, num * sizeof(int)) );
CUDA_SAFE_CALL( cudaMalloc((void**) &switch_membership_d, num * sizeof(bool)) );
CUDA_SAFE_CALL( cudaMalloc((void**) &p, num * sizeof(Point)) );
CUDA_SAFE_CALL( cudaMalloc((void**) &coord_d, num * dim * sizeof(float)) );
}
//=======================================
// Allocate Host Memory
//=======================================
void allocHostMem(int num, int dim)
{
coord_h = (float*) malloc( num * dim * sizeof(float) );
}
//=======================================
// Free Device Memory
//=======================================
void freeDevMem()
{
CUDA_SAFE_CALL( cudaFree(center_table_d) );
CUDA_SAFE_CALL( cudaFree(switch_membership_d) );
CUDA_SAFE_CALL( cudaFree(p) );
CUDA_SAFE_CALL( cudaFree(coord_d) );
}
//=======================================
// Free Host Memory
//=======================================
void freeHostMem()
{
free(coord_h);
}
//=======================================
// pgain Entry - CUDA SETUP + CUDA CALL
//=======================================
float pgain( long x, Points *points, float z, long int *numcenters, int kmax, bool *is_center, int *center_table, bool *switch_membership, bool isCoordChanged,
double *serial_t, double *cpu_to_gpu_t, double *gpu_to_cpu_t, double *alloc_t, double *kernel_t, double *free_t)
{
#ifdef CUDATIME
float tmp_t;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
cudaError_t error;
int stride = *numcenters + 1; // size of each work_mem segment
int K = *numcenters ; // number of centers
int num = points->num; // number of points
int dim = points->dim; // number of dimension
int nThread = num; // number of threads == number of data points
//=========================================
// ALLOCATE HOST MEMORY + DATA PREPARATION
//=========================================
work_mem_h = (float*) malloc(stride * (nThread + 1) * sizeof(float) );
// Only on the first iteration
if(iter == 0)
{
allocHostMem(num, dim);
}
// build center-index table
int count = 0;
for( int i=0; i<num; i++)
{
if( is_center[i] )
{
change_center_table = true;
center_table[i] = count++;
}
}
// Extract 'coord'
// Only if first iteration OR coord has changed
if(isCoordChanged || iter == 0)
{
for(int i=0; i<dim; i++)
{
for(int j=0; j<num; j++)
{
coord_h[ (num*i)+j ] = points->p[j].coord[i];
}
}
}
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*serial_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// ALLOCATE GPU MEMORY
//=======================================
CUDA_SAFE_CALL( cudaMalloc((void**) &work_mem_d, stride * (nThread + 1) * sizeof(float)) );
// Only on the first iteration
if( iter == 0 )
{
allocDevMem(num, dim);
}
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*alloc_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// CPU-TO-GPU MEMORY COPY
//=======================================
// Only if first iteration OR coord has changed
if(isCoordChanged || iter == 0)
{
CUDA_SAFE_CALL( cudaMemcpy(coord_d, coord_h, num * dim * sizeof(float), cudaMemcpyHostToDevice) );
}
if (change_center_table) {
CUDA_SAFE_CALL( cudaMemcpy(center_table_d, center_table, num * sizeof(int), cudaMemcpyHostToDevice) );
}
change_center_table = false;
if (change_p) {
CUDA_SAFE_CALL( cudaMemcpy(p, points->p, num * sizeof(Point), cudaMemcpyHostToDevice) );
}
change_p = false;
if (change_switch_membership) {
CUDA_SAFE_CALL( cudaMemset((void*) switch_membership_d, 0, num * sizeof(bool)) );
}
change_switch_membership = false;
CUDA_SAFE_CALL( cudaMemset((void*) work_mem_d, 0, stride * (nThread + 1) * sizeof(float)) );
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*cpu_to_gpu_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// KERNEL: CALCULATE COST
//=======================================
// Determine the number of thread blocks in the x- and y-dimension
int num_blocks = (int) ((float) (num + THREADS_PER_BLOCK - 1) / (float) THREADS_PER_BLOCK);
int num_blocks_y = (int) ((float) (num_blocks + MAXBLOCKS - 1) / (float) MAXBLOCKS);
int num_blocks_x = (int) ((float) (num_blocks+num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
kernel_compute_cost<<<grid_size, THREADS_PER_BLOCK>>>(
num, // in: # of data
dim, // in: dimension of point coordinates
x, // in: point to open a center at
p, // in: data point array
K, // in: number of centers
stride, // in: size of each work_mem segment
coord_d, // in: array of point coordinates
work_mem_d, // out: cost and lower field array
center_table_d, // in: center index table
switch_membership_d // out: changes in membership
);
cudaThreadSynchronize();
// error check
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("kernel error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*kernel_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// GPU-TO-CPU MEMORY COPY
//=======================================
CUDA_SAFE_CALL( cudaMemcpy(work_mem_h, work_mem_d, stride * (nThread + 1) * sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaMemcpy(switch_membership, switch_membership_d, num * sizeof(bool), cudaMemcpyDeviceToHost) );
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*gpu_to_cpu_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// CPU (SERIAL) WORK
//=======================================
int number_of_centers_to_close = 0;
float gl_cost_of_opening_x = z;
float *gl_lower = &work_mem_h[stride * nThread];
// compute the number of centers to close if we are to open i
for(int i=0; i < num; i++)
{
if( is_center[i] )
{
float low = z;
for( int j = 0; j < num; j++ )
{
low += work_mem_h[ j*stride + center_table[i] ];
}
gl_lower[center_table[i]] = low;
if ( low > 0 )
{
++number_of_centers_to_close;
work_mem_h[i*stride+K] -= low;
}
}
gl_cost_of_opening_x += work_mem_h[i*stride+K];
}
for(int i = 0; i < num; i++)
{
if ( switch_membership[i] ) {
change_switch_membership = true;
}
}
//if opening a center at x saves cost (i.e. cost is negative) do so; otherwise, do nothing
if ( gl_cost_of_opening_x < 0 )
{
for(int i = 0; i < num; i++)
{
bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ;
if ( switch_membership[i] || close_center )
{
change_p = true;
points->p[i].cost = dist(points->p[i], points->p[x], dim) * points->p[i].weight;
points->p[i].assign = x;
}
}
for(int i = 0; i < num; i++)
{
if( is_center[i] && gl_lower[center_table[i]] > 0 )
{
is_center[i] = false;
}
}
if( x >= 0 && x < num)
{
is_center[x] = true;
}
*numcenters = *numcenters + 1 - number_of_centers_to_close;
}
else
{
gl_cost_of_opening_x = 0;
}
//=======================================
// DEALLOCATE HOST MEMORY
//=======================================
free(work_mem_h);
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*serial_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// DEALLOCATE GPU MEMORY
//=======================================
CUDA_SAFE_CALL( cudaFree(work_mem_d) );
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*free_t += (double) tmp_t;
#endif
iter++;
return -gl_cost_of_opening_x;
}
|
8ce50a0c3be7c44f41b22fddf164041fe536a024.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_ltef (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y[id])?1.0f:0.0f;
}
} | 8ce50a0c3be7c44f41b22fddf164041fe536a024.cu | #include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_ltef (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y[id])?1.0f:0.0f;
}
} |
6c3711fef05c04fa6ac9cc0c3aae810a9c899cc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void rdiv_scalar_double(int n,int idx, float dx,float *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dx / dy[i];
}
} | 6c3711fef05c04fa6ac9cc0c3aae810a9c899cc5.cu | #include "includes.h"
extern "C"
__global__ void rdiv_scalar_double(int n,int idx, float dx,float *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dx / dy[i];
}
} |
244b8e0b3ac27e8f1cfb112b1cd599615588f7ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlascl2.cu, normal z -> s, Tue Aug 30 09:38:31 2016
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl2_full(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl2_lower(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl2_upper(int m, int n, const float *D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
SLASCL2 scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_slascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_slascl2_q(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( slascl2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( slascl2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( slascl2_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
}
| 244b8e0b3ac27e8f1cfb112b1cd599615588f7ba.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlascl2.cu, normal z -> s, Tue Aug 30 09:38:31 2016
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl2_full(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl2_lower(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl2_upper(int m, int n, const float *D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
SLASCL2 scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_slascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_slascl2_q(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
slascl2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
slascl2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
slascl2_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
}
|
d92dcf8a2ad69303f6e36d83932a8a420834aa35.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <roctracer/roctx.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <pthread.h>
//#include "utils.h"
#define BLOCKS 80
#define THREADS 512
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(0); \
} \
}
__global__ void bar() {
__shared__ int a[THREADS];
int tid = threadIdx.x;
for (int i = 0; i < 1000000; i++) {
a[tid] += tid + i;
}
// if (tid==10) printf("%d\n", a[tid]);
}
int foo() {
int sum = 0;
for(int i = 0; i < 1000000; i++) {
sum += i;
}
return sum;
}
int main(void) {
hipProfilerStart();
nvtxNameOsThread(pthread_self(), "MAIN");
roctxRangePush("Calling foo");
printf("%d\n", foo());
roctxRangePop();
roctxRangePush("Calling bar1");
hipLaunchKernelGGL(( bar), dim3(BLOCKS), dim3(THREADS), 0, 0, );
roctxRangePop();
cudaCheckError();
roctxRangePush("Calling bar2");
hipLaunchKernelGGL(( bar), dim3(BLOCKS), dim3(THREADS), 0, 0, );
roctxRangePop();
cudaCheckError();
roctxRangePush("Calling bar3");
hipLaunchKernelGGL(( bar), dim3(BLOCKS), dim3(THREADS), 0, 0, );
roctxRangePop();
cudaCheckError();
hipDeviceSynchronize();
hipProfilerStop();
return 0;
}
| d92dcf8a2ad69303f6e36d83932a8a420834aa35.cu | #include <cuda.h>
#include <nvToolsExt.h>
#include <cuda_profiler_api.h>
#include <stdio.h>
#include <pthread.h>
//#include "utils.h"
#define BLOCKS 80
#define THREADS 512
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(0); \
} \
}
__global__ void bar() {
__shared__ int a[THREADS];
int tid = threadIdx.x;
for (int i = 0; i < 1000000; i++) {
a[tid] += tid + i;
}
// if (tid==10) printf("%d\n", a[tid]);
}
int foo() {
int sum = 0;
for(int i = 0; i < 1000000; i++) {
sum += i;
}
return sum;
}
int main(void) {
cudaProfilerStart();
nvtxNameOsThread(pthread_self(), "MAIN");
nvtxRangePush("Calling foo");
printf("%d\n", foo());
nvtxRangePop();
nvtxRangePush("Calling bar1");
bar<<<BLOCKS, THREADS>>>();
nvtxRangePop();
cudaCheckError();
nvtxRangePush("Calling bar2");
bar<<<BLOCKS, THREADS>>>();
nvtxRangePop();
cudaCheckError();
nvtxRangePush("Calling bar3");
bar<<<BLOCKS, THREADS>>>();
nvtxRangePop();
cudaCheckError();
cudaDeviceSynchronize();
cudaProfilerStop();
return 0;
}
|
dc4d6e7959ed582343d566611e2db27407c5b2ea.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* nbody.cu
*
* N-body example that illustrates gravitational simulation.
* This is the type of computation that GPUs excel at:
* parallelizable, with lots of FLOPS per unit of external
* memory bandwidth required.
*
* Build with: nvcc -I ../chLib <options> nbody.cu nbody_CPU_SSE.cpp nbody_CPU_SSE_threaded.cpp nbody_GPU_shared.cu nbody_multiGPU.cu nbody_multiGPU_threaded.cu
* On Linux: nvcc -I ../chLib <options> nbody.cu nbody_CPU_SSE.cpp nbody_CPU_SSE_threaded.cpp nbody_GPU_shared.cu nbody_multiGPU.cu nbody_multiGPU_threaded.cu -lpthread -lrt
* Requires: No minimum SM requirement. If SM 3.x is not available,
* this application quietly replaces the shuffle and fast-atomic
* implementations with the shared memory implementation.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
// for kbhit()
#include <ch_conio.h>
#include <math.h>
#include <chCommandLine.h>
#include <chError.h>
#include <chThread.h>
#include <chTimer.h>
#include "nbody.h"
#include "bodybodyInteraction.cuh"
using namespace cudahandbook::threading;
inline void
randomVector( float v[3] )
{
float lenSqr;
do {
v[0] = rand() / (float) RAND_MAX * 2 - 1;
v[1] = rand() / (float) RAND_MAX * 2 - 1;
v[2] = rand() / (float) RAND_MAX * 2 - 1;
lenSqr = v[0]*v[0]+v[1]*v[1]+v[2]*v[2];
} while ( lenSqr > 1.0f );
}
void
randomUnitBodies( float *pos, float *vel, size_t N )
{
for ( size_t i = 0; i < N; i++ ) {
randomVector( &pos[4*i] );
randomVector( &vel[4*i] );
pos[4*i+3] = 1.0f; // unit mass
vel[4*i+3] = 1.0f;
}
}
template<typename T>
static float
relError( float a, float b )
{
if ( a == b ) return 0.0f;
return fabsf(a-b)/b;
}
bool g_bCUDAPresent;
bool g_bSM30Present;
float *g_hostAOS_PosMass;
float *g_hostAOS_VelInvMass;
float *g_hostAOS_Force;
float *g_dptrAOS_PosMass;
float *g_dptrAOS_Force;
// Buffer to hold the golden version of the forces, used for comparison
// Along with timing results, we report the maximum relative error with
// respect to this array.
float *g_hostAOS_Force_Golden;
float *g_hostSOA_Pos[3];
float *g_hostSOA_Force[3];
float *g_hostSOA_Mass;
float *g_hostSOA_InvMass;
size_t g_N;
float g_softening = 0.1f;
float g_damping = 0.995f;
float g_dt = 0.016f;
template<typename T>
static T
relError( T a, T b )
{
if ( a == b ) return 0.0f;
T relErr = (a-b)/b;
// Manually take absolute value
return (relErr<0.0f) ? -relErr : relErr;
}
#include "nbody_CPU_AOS.h"
#include "nbody_CPU_AOS_tiled.h"
#include "nbody_CPU_SOA.h"
#include "nbody_CPU_SIMD.h"
#ifndef NO_CUDA
#include "nbody_GPU_AOS.cuh"
#include "nbody_GPU_AOS_const.cuh"
#include "nbody_GPU_AOS_tiled.cuh"
//#include "nbody_GPU_SOA_tiled.cuh"
#include "nbody_GPU_Shuffle.cuh"
#include "nbody_GPU_Atomic_hip.cuh"
#endif
void
integrateGravitation_AOS( float *ppos, float *pvel, float *pforce, float dt, float damping, size_t N )
{
for ( size_t i = 0; i < N; i++ ) {
int index = 4*i;
int indexForce = 3*i;
float pos[3], vel[3], force[3];
pos[0] = ppos[index+0];
pos[1] = ppos[index+1];
pos[2] = ppos[index+2];
float invMass = pvel[index+3];
vel[0] = pvel[index+0];
vel[1] = pvel[index+1];
vel[2] = pvel[index+2];
force[0] = pforce[indexForce+0];
force[1] = pforce[indexForce+1];
force[2] = pforce[indexForce+2];
// acceleration = force / mass;
// new velocity = old velocity + acceleration * deltaTime
vel[0] += (force[0] * invMass) * dt;
vel[1] += (force[1] * invMass) * dt;
vel[2] += (force[2] * invMass) * dt;
vel[0] *= damping;
vel[1] *= damping;
vel[2] *= damping;
// new position = old position + velocity * deltaTime
pos[0] += vel[0] * dt;
pos[1] += vel[1] * dt;
pos[2] += vel[2] * dt;
ppos[index+0] = pos[0];
ppos[index+1] = pos[1];
ppos[index+2] = pos[2];
pvel[index+0] = vel[0];
pvel[index+1] = vel[1];
pvel[index+2] = vel[2];
}
}
enum nbodyAlgorithm_enum g_Algorithm;
//
// g_maxAlgorithm is used to determine when to rotate g_Algorithm back to CPU_AOS
// If CUDA is present, it is CPU_SIMD_threaded, otherwise it depends on SM version
//
// The shuffle and tiled implementations are SM 3.0 only.
//
// The CPU and GPU algorithms must be contiguous, and the logic in main() to
// initialize this value must be modified if any new algorithms are added.
//
enum nbodyAlgorithm_enum g_maxAlgorithm;
bool g_bCrossCheck = true;
bool g_bUseSIMDForCrossCheck = true;
bool g_bNoCPU = false;
bool
ComputeGravitation(
float *ms,
float *maxRelError,
nbodyAlgorithm_enum algorithm,
bool bCrossCheck )
{
hipError_t status;
bool bSOA = false;
// AOS -> SOA data structures in case we are measuring SOA performance
for ( size_t i = 0; i < g_N; i++ ) {
g_hostSOA_Pos[0][i] = g_hostAOS_PosMass[4*i+0];
g_hostSOA_Pos[1][i] = g_hostAOS_PosMass[4*i+1];
g_hostSOA_Pos[2][i] = g_hostAOS_PosMass[4*i+2];
g_hostSOA_Mass[i] = g_hostAOS_PosMass[4*i+3];
g_hostSOA_InvMass[i] = 1.0f / g_hostSOA_Mass[i];
}
if ( bCrossCheck ) {
#ifdef HAVE_SIMD_THREADED
if ( g_bUseSIMDForCrossCheck ) {
ComputeGravitation_SIMD_threaded(
g_hostSOA_Force,
g_hostSOA_Pos,
g_hostSOA_Mass,
g_softening*g_softening,
g_N );
for ( size_t i = 0; i < g_N; i++ ) {
g_hostAOS_Force_Golden[3*i+0] = g_hostSOA_Force[0][i];
g_hostAOS_Force_Golden[3*i+1] = g_hostSOA_Force[1][i];
g_hostAOS_Force_Golden[3*i+2] = g_hostSOA_Force[2][i];
}
}
else {
#endif
ComputeGravitation_AOS(
g_hostAOS_Force_Golden,
g_hostAOS_PosMass,
g_softening*g_softening,
g_N );
#ifdef HAVE_SIMD_THREADED
}
#endif
}
// CPU->GPU copies in case we are measuring GPU performance
if ( g_bCUDAPresent ) {
CUDART_CHECK( hipMemcpyAsync( g_dptrAOS_PosMass, g_hostAOS_PosMass, 4*g_N*sizeof(float), hipMemcpyHostToDevice ) );
}
switch ( algorithm ) {
case CPU_AOS:
*ms = ComputeGravitation_AOS(
g_hostAOS_Force,
g_hostAOS_PosMass,
g_softening*g_softening,
g_N );
break;
case CPU_AOS_tiled:
*ms = ComputeGravitation_AOS_tiled(
g_hostAOS_Force,
g_hostAOS_PosMass,
g_softening*g_softening,
g_N );
break;
case CPU_SOA:
*ms = ComputeGravitation_SOA(
g_hostSOA_Force,
g_hostSOA_Pos,
g_hostSOA_Mass,
g_softening*g_softening,
g_N );
bSOA = true;
break;
#ifdef HAVE_SIMD
case CPU_SIMD:
*ms = ComputeGravitation_SIMD(
g_hostSOA_Force,
g_hostSOA_Pos,
g_hostSOA_Mass,
g_softening*g_softening,
g_N );
bSOA = true;
break;
#endif
#ifdef HAVE_SIMD_THREADED
case CPU_SIMD_threaded:
*ms = ComputeGravitation_SIMD_threaded(
g_hostSOA_Force,
g_hostSOA_Pos,
g_hostSOA_Mass,
g_softening*g_softening,
g_N );
bSOA = true;
break;
#endif
#ifdef HAVE_SIMD_OPENMP
case CPU_SIMD_openmp:
*ms = ComputeGravitation_SIMD_openmp(
g_hostSOA_Force,
g_hostSOA_Pos,
g_hostSOA_Mass,
g_softening*g_softening,
g_N );
bSOA = true;
break;
#endif
#ifndef NO_CUDA
case GPU_AOS:
*ms = ComputeGravitation_GPU_AOS(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( hipMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) );
break;
case GPU_AOS_tiled:
*ms = ComputeGravitation_GPU_AOS_tiled(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( hipMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) );
break;
#if 0
// commented out - too slow even on SM 3.0
case GPU_Atomic:
CUDART_CHECK( hipMemset( g_dptrAOS_Force, 0, 3*sizeof(float) ) );
*ms = ComputeGravitation_GPU_Atomic(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( hipMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) );
break;
#endif
case GPU_Shared:
CUDART_CHECK( hipMemset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) );
*ms = ComputeGravitation_GPU_Shared(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( hipMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) );
break;
case GPU_Const:
CUDART_CHECK( hipMemset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) );
*ms = ComputeNBodyGravitation_GPU_AOS_const(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( hipMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) );
break;
case GPU_Shuffle:
CUDART_CHECK( hipMemset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) );
*ms = ComputeGravitation_GPU_Shuffle(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( hipMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) );
break;
case multiGPU_SingleCPUThread:
memset( g_hostAOS_Force, 0, 3*g_N*sizeof(float) );
*ms = ComputeGravitation_multiGPU_singlethread(
g_hostAOS_Force,
g_hostAOS_PosMass,
g_softening*g_softening,
g_N );
break;
case multiGPU_MultiCPUThread:
memset( g_hostAOS_Force, 0, 3*g_N*sizeof(float) );
*ms = ComputeGravitation_multiGPU_threaded(
g_hostAOS_Force,
g_hostAOS_PosMass,
g_softening*g_softening,
g_N );
break;
#endif
default:
fprintf(stderr, "Unrecognized algorithm index: %d\n", algorithm);
abort();
break;
}
// SOA -> AOS
if ( bSOA ) {
for ( size_t i = 0; i < g_N; i++ ) {
g_hostAOS_Force[3*i+0] = g_hostSOA_Force[0][i];
g_hostAOS_Force[3*i+1] = g_hostSOA_Force[1][i];
g_hostAOS_Force[3*i+2] = g_hostSOA_Force[2][i];
}
}
*maxRelError = 0.0f;
if ( bCrossCheck ) {
float max = 0.0f;
for ( size_t i = 0; i < 3*g_N; i++ ) {
float err = relError( g_hostAOS_Force[i], g_hostAOS_Force_Golden[i] );
if ( err > max ) {
max = err;
}
}
*maxRelError = max;
}
integrateGravitation_AOS(
g_hostAOS_PosMass,
g_hostAOS_VelInvMass,
g_hostAOS_Force,
g_dt,
g_damping,
g_N );
return true;
Error:
return false;
}
workerThread *g_CPUThreadPool;
int g_numCPUCores;
workerThread *g_GPUThreadPool;
int g_numGPUs;
struct gpuInit_struct
{
int iGPU;
hipError_t status;
};
void
initializeGPU( void *_p )
{
hipError_t status;
gpuInit_struct *p = (gpuInit_struct *) _p;
CUDART_CHECK( hipSetDevice( p->iGPU ) );
CUDART_CHECK( hipSetDeviceFlags( hipDeviceMapHost ) );
CUDART_CHECK( hipFree(0) );
Error:
p->status = status;
}
int
main( int argc, char *argv[] )
{
hipError_t status;
// kiloparticles
int kParticles = 4, kMaxIterations = 0;
if ( 1 == argc ) {
printf( "Usage: nbody --numbodies <N> [--nocpu] [--nocrosscheck] [--iterations <N>]\n" );
printf( " --numbodies is multiplied by 1024 (default is 4)\n" );
printf( " By default, the app checks results against a CPU implementation; \n" );
printf( " disable this behavior with --nocrosscheck.\n" );
printf( " The CPU implementation may be disabled with --nocpu.\n" );
printf( " --nocpu implies --nocrosscheck.\n\n" );
printf( " --nosimd uses serial CPU implementation instead of SIMD.\n" );
printf( " --iterations specifies a fixed number of iterations to execute\n");
return 1;
}
// for reproducible results for a given N
srand(7);
{
g_numCPUCores = processorCount();
g_CPUThreadPool = new workerThread[g_numCPUCores];
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
if ( ! g_CPUThreadPool[i].initialize( ) ) {
fprintf( stderr, "Error initializing thread pool\n" );
return 1;
}
}
}
status = hipGetDeviceCount( &g_numGPUs );
g_bCUDAPresent = (hipSuccess == status) && (g_numGPUs > 0);
if ( g_bCUDAPresent ) {
hipDeviceProp_t prop;
CUDART_CHECK( hipGetDeviceProperties( &prop, 0 ) );
g_bSM30Present = prop.major >= 3;
}
g_bNoCPU = chCommandLineGetBool( "nocpu", argc, argv );
if ( g_bNoCPU && ! g_bCUDAPresent ) {
printf( "--nocpu specified, but no CUDA present...exiting\n" );
exit(1);
}
if ( g_numGPUs ) {
chCommandLineGet( &g_numGPUs, "numgpus", argc, argv );
g_GPUThreadPool = new workerThread[g_numGPUs];
for ( size_t i = 0; i < g_numGPUs; i++ ) {
if ( ! g_GPUThreadPool[i].initialize( ) ) {
fprintf( stderr, "Error initializing thread pool\n" );
return 1;
}
}
for ( int i = 0; i < g_numGPUs; i++ ) {
gpuInit_struct initGPU = {i};
g_GPUThreadPool[i].delegateSynchronous(
initializeGPU,
&initGPU );
if ( hipSuccess != initGPU.status ) {
fprintf( stderr, "Initializing GPU %d failed "
" with %d (%s)\n",
i,
initGPU.status,
hipGetErrorString( initGPU.status ) );
return 1;
}
}
}
g_bCrossCheck = ! chCommandLineGetBool( "nocrosscheck", argc, argv );
if ( g_bNoCPU ) {
g_bCrossCheck = false;
}
if ( g_bCrossCheck && chCommandLineGetBool( "nosse", argc, argv ) ) {
g_bUseSIMDForCrossCheck = false;
}
chCommandLineGet( &kParticles, "numbodies", argc, argv );
g_N = kParticles*1024;
chCommandLineGet( &kMaxIterations, "iterations", argc, argv);
// Round down to the nearest multiple of the CPU count (e.g. if we have
// a system with a CPU count that isn't a power of two, we need to round)
g_N -= g_N % g_numCPUCores;
printf( "Running simulation with %d particles, crosscheck %s, CPU %s\n", (int) g_N,
g_bCrossCheck ? "enabled" : "disabled",
g_bNoCPU ? "disabled" : "enabled" );
#if defined(HAVE_SIMD_OPENMP)
g_maxAlgorithm = CPU_SIMD_openmp;
#elif defined(HAVE_SIMD_THREADED)
g_maxAlgorithm = CPU_SIMD_threaded;
#elif defined(HAVE_SIMD)
g_maxAlgorithm = CPU_SIMD;
#else
g_maxAlgorithm = CPU_SOA;
#endif
g_Algorithm = g_bCUDAPresent ? GPU_AOS : g_maxAlgorithm;
if ( g_bCUDAPresent || g_bNoCPU ) {
// max algorithm is different depending on whether SM 3.0 is present
g_maxAlgorithm = g_bSM30Present ? GPU_AOS_tiled : multiGPU_MultiCPUThread;
}
if ( g_bCUDAPresent ) {
hipDeviceProp_t propForVersion;
CUDART_CHECK( hipSetDeviceFlags( hipDeviceMapHost ) );
CUDART_CHECK( hipGetDeviceProperties( &propForVersion, 0 ) );
if ( propForVersion.major < 3 ) {
// Only SM 3.x supports shuffle and fast atomics, so we cannot run
// some algorithms on this board.
g_maxAlgorithm = multiGPU_MultiCPUThread;
}
CUDART_CHECK( hipHostMalloc( (void **) &g_hostAOS_PosMass, 4*g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) );
for ( int i = 0; i < 3; i++ ) {
CUDART_CHECK( hipHostMalloc( (void **) &g_hostSOA_Pos[i], g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) );
CUDART_CHECK( hipHostMalloc( (void **) &g_hostSOA_Force[i], g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) );
}
CUDART_CHECK( hipHostMalloc( (void **) &g_hostAOS_Force, 3*g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) );
CUDART_CHECK( hipHostMalloc( (void **) &g_hostAOS_Force_Golden, 3*g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) );
CUDART_CHECK( hipHostMalloc( (void **) &g_hostAOS_VelInvMass, 4*g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) );
CUDART_CHECK( hipHostMalloc( (void **) &g_hostSOA_Mass, g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) );
CUDART_CHECK( hipHostMalloc( (void **) &g_hostSOA_InvMass, g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) );
CUDART_CHECK( hipMalloc( &g_dptrAOS_PosMass, 4*g_N*sizeof(float) ) );
CUDART_CHECK( hipMalloc( (void **) &g_dptrAOS_Force, 3*g_N*sizeof(float) ) );
}
else {
g_hostAOS_PosMass = new float[4*g_N];
for ( int i = 0; i < 3; i++ ) {
g_hostSOA_Pos[i] = new float[g_N];
g_hostSOA_Force[i] = new float[g_N];
}
g_hostSOA_Mass = new float[g_N];
g_hostAOS_Force = new float[3*g_N];
g_hostAOS_Force_Golden = new float[3*g_N];
g_hostAOS_VelInvMass = new float[4*g_N];
g_hostSOA_Mass = new float[g_N];
g_hostSOA_InvMass = new float[g_N];
}
randomUnitBodies( g_hostAOS_PosMass, g_hostAOS_VelInvMass, g_N );
for ( size_t i = 0; i < g_N; i++ ) {
g_hostSOA_Mass[i] = g_hostAOS_PosMass[4*i+3];
g_hostSOA_InvMass[i] = 1.0f / g_hostSOA_Mass[i];
}
#if 0
// gather performance data over GPU implementations
// for different problem sizes.
printf( "kBodies\t" );
for ( int algorithm = GPU_AOS;
algorithm < sizeof(rgszAlgorithmNames)/sizeof(rgszAlgorithmNames[0]);
algorithm++ ) {
printf( "%s\t", rgszAlgorithmNames[algorithm] );
}
printf( "\n" );
for ( int kBodies = 3; kBodies <= 96; kBodies += 3 ) {
g_N = 1024*kBodies;
printf( "%d\t", kBodies );
for ( int algorithm = GPU_AOS;
algorithm < sizeof(rgszAlgorithmNames)/sizeof(rgszAlgorithmNames[0]);
algorithm++ ) {
float sum = 0.0f;
const int numIterations = 10;
for ( int i = 0; i < numIterations; i++ ) {
float ms, err;
if ( ! ComputeGravitation( &ms, &err, (nbodyAlgorithm_enum) algorithm, g_bCrossCheck ) ) {
fprintf( stderr, "Error computing timestep\n" );
exit(1);
}
sum += ms;
}
sum /= (float) numIterations;
double interactionsPerSecond = (double) g_N*g_N*1000.0f / sum;
if ( interactionsPerSecond > 1e9 ) {
printf ( "%.2f\t", interactionsPerSecond/1e9 );
}
else {
printf ( "%.3f\t", interactionsPerSecond/1e9 );
}
}
printf( "\n" );
}
return 0;
#endif
{
int kIterations = 0;
bool bStop = false;
while ( ! bStop ) {
float ms, err;
if ( ! ComputeGravitation( &ms, &err, g_Algorithm, g_bCrossCheck ) ) {
fprintf( stderr, "Error computing timestep\n" );
exit(1);
}
double interactionsPerSecond = (double) g_N*g_N*1000.0f / ms;
if ( interactionsPerSecond > 1e9 ) {
printf ( "\r%s: %8.2f ms = %8.3fx10^9 interactions/s (Rel. error: %E)\n",
rgszAlgorithmNames[g_Algorithm],
ms,
interactionsPerSecond/1e9,
err );
}
else {
printf ( "\r%s: %8.2f ms = %8.3fx10^6 interactions/s (Rel. error: %E)\n",
rgszAlgorithmNames[g_Algorithm],
ms,
interactionsPerSecond/1e6,
err );
}
if (kMaxIterations) {
kIterations++;
if (kIterations >= kMaxIterations) {
bStop = true;
}
}
if ( kbhit() ) {
char c = getch();
switch ( c ) {
case ' ':
if ( g_Algorithm == g_maxAlgorithm ) {
g_Algorithm = g_bNoCPU ? GPU_AOS : CPU_AOS;
// Skip slow CPU implementations if we are using SIMD for cross-check
if ( g_bUseSIMDForCrossCheck ) {
#if defined(HAVE_SIMD_THREADED)
g_Algorithm = CPU_SIMD_threaded;
#elif defined(HAVE_SIMD_OPENMP)
g_Algorithm = CPU_SIMD_openmp;
#endif
}
}
else {
g_Algorithm = (enum nbodyAlgorithm_enum) (g_Algorithm+1);
}
break;
case 'q':
case 'Q':
bStop = true;
break;
}
}
}
}
return 0;
Error:
if ( hipSuccess != status ) {
printf( "CUDA Error: %s\n", hipGetErrorString( status ) );
}
return 1;
}
| dc4d6e7959ed582343d566611e2db27407c5b2ea.cu | /*
*
* nbody.cu
*
* N-body example that illustrates gravitational simulation.
* This is the type of computation that GPUs excel at:
* parallelizable, with lots of FLOPS per unit of external
* memory bandwidth required.
*
* Build with: nvcc -I ../chLib <options> nbody.cu nbody_CPU_SSE.cpp nbody_CPU_SSE_threaded.cpp nbody_GPU_shared.cu nbody_multiGPU.cu nbody_multiGPU_threaded.cu
* On Linux: nvcc -I ../chLib <options> nbody.cu nbody_CPU_SSE.cpp nbody_CPU_SSE_threaded.cpp nbody_GPU_shared.cu nbody_multiGPU.cu nbody_multiGPU_threaded.cu -lpthread -lrt
* Requires: No minimum SM requirement. If SM 3.x is not available,
* this application quietly replaces the shuffle and fast-atomic
* implementations with the shared memory implementation.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
// for kbhit()
#include <ch_conio.h>
#include <math.h>
#include <chCommandLine.h>
#include <chError.h>
#include <chThread.h>
#include <chTimer.h>
#include "nbody.h"
#include "bodybodyInteraction.cuh"
using namespace cudahandbook::threading;
inline void
randomVector( float v[3] )
{
float lenSqr;
do {
v[0] = rand() / (float) RAND_MAX * 2 - 1;
v[1] = rand() / (float) RAND_MAX * 2 - 1;
v[2] = rand() / (float) RAND_MAX * 2 - 1;
lenSqr = v[0]*v[0]+v[1]*v[1]+v[2]*v[2];
} while ( lenSqr > 1.0f );
}
void
randomUnitBodies( float *pos, float *vel, size_t N )
{
for ( size_t i = 0; i < N; i++ ) {
randomVector( &pos[4*i] );
randomVector( &vel[4*i] );
pos[4*i+3] = 1.0f; // unit mass
vel[4*i+3] = 1.0f;
}
}
template<typename T>
static float
relError( float a, float b )
{
if ( a == b ) return 0.0f;
return fabsf(a-b)/b;
}
bool g_bCUDAPresent;
bool g_bSM30Present;
float *g_hostAOS_PosMass;
float *g_hostAOS_VelInvMass;
float *g_hostAOS_Force;
float *g_dptrAOS_PosMass;
float *g_dptrAOS_Force;
// Buffer to hold the golden version of the forces, used for comparison
// Along with timing results, we report the maximum relative error with
// respect to this array.
float *g_hostAOS_Force_Golden;
float *g_hostSOA_Pos[3];
float *g_hostSOA_Force[3];
float *g_hostSOA_Mass;
float *g_hostSOA_InvMass;
size_t g_N;
float g_softening = 0.1f;
float g_damping = 0.995f;
float g_dt = 0.016f;
template<typename T>
static T
relError( T a, T b )
{
if ( a == b ) return 0.0f;
T relErr = (a-b)/b;
// Manually take absolute value
return (relErr<0.0f) ? -relErr : relErr;
}
#include "nbody_CPU_AOS.h"
#include "nbody_CPU_AOS_tiled.h"
#include "nbody_CPU_SOA.h"
#include "nbody_CPU_SIMD.h"
#ifndef NO_CUDA
#include "nbody_GPU_AOS.cuh"
#include "nbody_GPU_AOS_const.cuh"
#include "nbody_GPU_AOS_tiled.cuh"
//#include "nbody_GPU_SOA_tiled.cuh"
#include "nbody_GPU_Shuffle.cuh"
#include "nbody_GPU_Atomic.cuh"
#endif
void
integrateGravitation_AOS( float *ppos, float *pvel, float *pforce, float dt, float damping, size_t N )
{
for ( size_t i = 0; i < N; i++ ) {
int index = 4*i;
int indexForce = 3*i;
float pos[3], vel[3], force[3];
pos[0] = ppos[index+0];
pos[1] = ppos[index+1];
pos[2] = ppos[index+2];
float invMass = pvel[index+3];
vel[0] = pvel[index+0];
vel[1] = pvel[index+1];
vel[2] = pvel[index+2];
force[0] = pforce[indexForce+0];
force[1] = pforce[indexForce+1];
force[2] = pforce[indexForce+2];
// acceleration = force / mass;
// new velocity = old velocity + acceleration * deltaTime
vel[0] += (force[0] * invMass) * dt;
vel[1] += (force[1] * invMass) * dt;
vel[2] += (force[2] * invMass) * dt;
vel[0] *= damping;
vel[1] *= damping;
vel[2] *= damping;
// new position = old position + velocity * deltaTime
pos[0] += vel[0] * dt;
pos[1] += vel[1] * dt;
pos[2] += vel[2] * dt;
ppos[index+0] = pos[0];
ppos[index+1] = pos[1];
ppos[index+2] = pos[2];
pvel[index+0] = vel[0];
pvel[index+1] = vel[1];
pvel[index+2] = vel[2];
}
}
enum nbodyAlgorithm_enum g_Algorithm;
//
// g_maxAlgorithm is used to determine when to rotate g_Algorithm back to CPU_AOS
// If CUDA is present, it is CPU_SIMD_threaded, otherwise it depends on SM version
//
// The shuffle and tiled implementations are SM 3.0 only.
//
// The CPU and GPU algorithms must be contiguous, and the logic in main() to
// initialize this value must be modified if any new algorithms are added.
//
enum nbodyAlgorithm_enum g_maxAlgorithm;
bool g_bCrossCheck = true;
bool g_bUseSIMDForCrossCheck = true;
bool g_bNoCPU = false;
bool
ComputeGravitation(
float *ms,
float *maxRelError,
nbodyAlgorithm_enum algorithm,
bool bCrossCheck )
{
cudaError_t status;
bool bSOA = false;
// AOS -> SOA data structures in case we are measuring SOA performance
for ( size_t i = 0; i < g_N; i++ ) {
g_hostSOA_Pos[0][i] = g_hostAOS_PosMass[4*i+0];
g_hostSOA_Pos[1][i] = g_hostAOS_PosMass[4*i+1];
g_hostSOA_Pos[2][i] = g_hostAOS_PosMass[4*i+2];
g_hostSOA_Mass[i] = g_hostAOS_PosMass[4*i+3];
g_hostSOA_InvMass[i] = 1.0f / g_hostSOA_Mass[i];
}
if ( bCrossCheck ) {
#ifdef HAVE_SIMD_THREADED
if ( g_bUseSIMDForCrossCheck ) {
ComputeGravitation_SIMD_threaded(
g_hostSOA_Force,
g_hostSOA_Pos,
g_hostSOA_Mass,
g_softening*g_softening,
g_N );
for ( size_t i = 0; i < g_N; i++ ) {
g_hostAOS_Force_Golden[3*i+0] = g_hostSOA_Force[0][i];
g_hostAOS_Force_Golden[3*i+1] = g_hostSOA_Force[1][i];
g_hostAOS_Force_Golden[3*i+2] = g_hostSOA_Force[2][i];
}
}
else {
#endif
ComputeGravitation_AOS(
g_hostAOS_Force_Golden,
g_hostAOS_PosMass,
g_softening*g_softening,
g_N );
#ifdef HAVE_SIMD_THREADED
}
#endif
}
// CPU->GPU copies in case we are measuring GPU performance
if ( g_bCUDAPresent ) {
CUDART_CHECK( cudaMemcpyAsync( g_dptrAOS_PosMass, g_hostAOS_PosMass, 4*g_N*sizeof(float), cudaMemcpyHostToDevice ) );
}
switch ( algorithm ) {
case CPU_AOS:
*ms = ComputeGravitation_AOS(
g_hostAOS_Force,
g_hostAOS_PosMass,
g_softening*g_softening,
g_N );
break;
case CPU_AOS_tiled:
*ms = ComputeGravitation_AOS_tiled(
g_hostAOS_Force,
g_hostAOS_PosMass,
g_softening*g_softening,
g_N );
break;
case CPU_SOA:
*ms = ComputeGravitation_SOA(
g_hostSOA_Force,
g_hostSOA_Pos,
g_hostSOA_Mass,
g_softening*g_softening,
g_N );
bSOA = true;
break;
#ifdef HAVE_SIMD
case CPU_SIMD:
*ms = ComputeGravitation_SIMD(
g_hostSOA_Force,
g_hostSOA_Pos,
g_hostSOA_Mass,
g_softening*g_softening,
g_N );
bSOA = true;
break;
#endif
#ifdef HAVE_SIMD_THREADED
case CPU_SIMD_threaded:
*ms = ComputeGravitation_SIMD_threaded(
g_hostSOA_Force,
g_hostSOA_Pos,
g_hostSOA_Mass,
g_softening*g_softening,
g_N );
bSOA = true;
break;
#endif
#ifdef HAVE_SIMD_OPENMP
case CPU_SIMD_openmp:
*ms = ComputeGravitation_SIMD_openmp(
g_hostSOA_Force,
g_hostSOA_Pos,
g_hostSOA_Mass,
g_softening*g_softening,
g_N );
bSOA = true;
break;
#endif
#ifndef NO_CUDA
case GPU_AOS:
*ms = ComputeGravitation_GPU_AOS(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( cudaMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) );
break;
case GPU_AOS_tiled:
*ms = ComputeGravitation_GPU_AOS_tiled(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( cudaMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) );
break;
#if 0
// commented out - too slow even on SM 3.0
case GPU_Atomic:
CUDART_CHECK( cudaMemset( g_dptrAOS_Force, 0, 3*sizeof(float) ) );
*ms = ComputeGravitation_GPU_Atomic(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( cudaMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) );
break;
#endif
case GPU_Shared:
CUDART_CHECK( cudaMemset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) );
*ms = ComputeGravitation_GPU_Shared(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( cudaMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) );
break;
case GPU_Const:
CUDART_CHECK( cudaMemset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) );
*ms = ComputeNBodyGravitation_GPU_AOS_const(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( cudaMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) );
break;
case GPU_Shuffle:
CUDART_CHECK( cudaMemset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) );
*ms = ComputeGravitation_GPU_Shuffle(
g_dptrAOS_Force,
g_dptrAOS_PosMass,
g_softening*g_softening,
g_N );
CUDART_CHECK( cudaMemcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) );
break;
case multiGPU_SingleCPUThread:
memset( g_hostAOS_Force, 0, 3*g_N*sizeof(float) );
*ms = ComputeGravitation_multiGPU_singlethread(
g_hostAOS_Force,
g_hostAOS_PosMass,
g_softening*g_softening,
g_N );
break;
case multiGPU_MultiCPUThread:
memset( g_hostAOS_Force, 0, 3*g_N*sizeof(float) );
*ms = ComputeGravitation_multiGPU_threaded(
g_hostAOS_Force,
g_hostAOS_PosMass,
g_softening*g_softening,
g_N );
break;
#endif
default:
fprintf(stderr, "Unrecognized algorithm index: %d\n", algorithm);
abort();
break;
}
// SOA -> AOS
if ( bSOA ) {
for ( size_t i = 0; i < g_N; i++ ) {
g_hostAOS_Force[3*i+0] = g_hostSOA_Force[0][i];
g_hostAOS_Force[3*i+1] = g_hostSOA_Force[1][i];
g_hostAOS_Force[3*i+2] = g_hostSOA_Force[2][i];
}
}
*maxRelError = 0.0f;
if ( bCrossCheck ) {
float max = 0.0f;
for ( size_t i = 0; i < 3*g_N; i++ ) {
float err = relError( g_hostAOS_Force[i], g_hostAOS_Force_Golden[i] );
if ( err > max ) {
max = err;
}
}
*maxRelError = max;
}
integrateGravitation_AOS(
g_hostAOS_PosMass,
g_hostAOS_VelInvMass,
g_hostAOS_Force,
g_dt,
g_damping,
g_N );
return true;
Error:
return false;
}
workerThread *g_CPUThreadPool;
int g_numCPUCores;
workerThread *g_GPUThreadPool;
int g_numGPUs;
struct gpuInit_struct
{
int iGPU;
cudaError_t status;
};
void
initializeGPU( void *_p )
{
cudaError_t status;
gpuInit_struct *p = (gpuInit_struct *) _p;
CUDART_CHECK( cudaSetDevice( p->iGPU ) );
CUDART_CHECK( cudaSetDeviceFlags( cudaDeviceMapHost ) );
CUDART_CHECK( cudaFree(0) );
Error:
p->status = status;
}
int
main( int argc, char *argv[] )
{
cudaError_t status;
// kiloparticles
int kParticles = 4, kMaxIterations = 0;
if ( 1 == argc ) {
printf( "Usage: nbody --numbodies <N> [--nocpu] [--nocrosscheck] [--iterations <N>]\n" );
printf( " --numbodies is multiplied by 1024 (default is 4)\n" );
printf( " By default, the app checks results against a CPU implementation; \n" );
printf( " disable this behavior with --nocrosscheck.\n" );
printf( " The CPU implementation may be disabled with --nocpu.\n" );
printf( " --nocpu implies --nocrosscheck.\n\n" );
printf( " --nosimd uses serial CPU implementation instead of SIMD.\n" );
printf( " --iterations specifies a fixed number of iterations to execute\n");
return 1;
}
// for reproducible results for a given N
srand(7);
{
g_numCPUCores = processorCount();
g_CPUThreadPool = new workerThread[g_numCPUCores];
for ( size_t i = 0; i < g_numCPUCores; i++ ) {
if ( ! g_CPUThreadPool[i].initialize( ) ) {
fprintf( stderr, "Error initializing thread pool\n" );
return 1;
}
}
}
status = cudaGetDeviceCount( &g_numGPUs );
g_bCUDAPresent = (cudaSuccess == status) && (g_numGPUs > 0);
if ( g_bCUDAPresent ) {
cudaDeviceProp prop;
CUDART_CHECK( cudaGetDeviceProperties( &prop, 0 ) );
g_bSM30Present = prop.major >= 3;
}
g_bNoCPU = chCommandLineGetBool( "nocpu", argc, argv );
if ( g_bNoCPU && ! g_bCUDAPresent ) {
printf( "--nocpu specified, but no CUDA present...exiting\n" );
exit(1);
}
if ( g_numGPUs ) {
chCommandLineGet( &g_numGPUs, "numgpus", argc, argv );
g_GPUThreadPool = new workerThread[g_numGPUs];
for ( size_t i = 0; i < g_numGPUs; i++ ) {
if ( ! g_GPUThreadPool[i].initialize( ) ) {
fprintf( stderr, "Error initializing thread pool\n" );
return 1;
}
}
for ( int i = 0; i < g_numGPUs; i++ ) {
gpuInit_struct initGPU = {i};
g_GPUThreadPool[i].delegateSynchronous(
initializeGPU,
&initGPU );
if ( cudaSuccess != initGPU.status ) {
fprintf( stderr, "Initializing GPU %d failed "
" with %d (%s)\n",
i,
initGPU.status,
cudaGetErrorString( initGPU.status ) );
return 1;
}
}
}
g_bCrossCheck = ! chCommandLineGetBool( "nocrosscheck", argc, argv );
if ( g_bNoCPU ) {
g_bCrossCheck = false;
}
if ( g_bCrossCheck && chCommandLineGetBool( "nosse", argc, argv ) ) {
g_bUseSIMDForCrossCheck = false;
}
chCommandLineGet( &kParticles, "numbodies", argc, argv );
g_N = kParticles*1024;
chCommandLineGet( &kMaxIterations, "iterations", argc, argv);
// Round down to the nearest multiple of the CPU count (e.g. if we have
// a system with a CPU count that isn't a power of two, we need to round)
g_N -= g_N % g_numCPUCores;
printf( "Running simulation with %d particles, crosscheck %s, CPU %s\n", (int) g_N,
g_bCrossCheck ? "enabled" : "disabled",
g_bNoCPU ? "disabled" : "enabled" );
#if defined(HAVE_SIMD_OPENMP)
g_maxAlgorithm = CPU_SIMD_openmp;
#elif defined(HAVE_SIMD_THREADED)
g_maxAlgorithm = CPU_SIMD_threaded;
#elif defined(HAVE_SIMD)
g_maxAlgorithm = CPU_SIMD;
#else
g_maxAlgorithm = CPU_SOA;
#endif
g_Algorithm = g_bCUDAPresent ? GPU_AOS : g_maxAlgorithm;
if ( g_bCUDAPresent || g_bNoCPU ) {
// max algorithm is different depending on whether SM 3.0 is present
g_maxAlgorithm = g_bSM30Present ? GPU_AOS_tiled : multiGPU_MultiCPUThread;
}
if ( g_bCUDAPresent ) {
cudaDeviceProp propForVersion;
CUDART_CHECK( cudaSetDeviceFlags( cudaDeviceMapHost ) );
CUDART_CHECK( cudaGetDeviceProperties( &propForVersion, 0 ) );
if ( propForVersion.major < 3 ) {
// Only SM 3.x supports shuffle and fast atomics, so we cannot run
// some algorithms on this board.
g_maxAlgorithm = multiGPU_MultiCPUThread;
}
CUDART_CHECK( cudaHostAlloc( (void **) &g_hostAOS_PosMass, 4*g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) );
for ( int i = 0; i < 3; i++ ) {
CUDART_CHECK( cudaHostAlloc( (void **) &g_hostSOA_Pos[i], g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) );
CUDART_CHECK( cudaHostAlloc( (void **) &g_hostSOA_Force[i], g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) );
}
CUDART_CHECK( cudaHostAlloc( (void **) &g_hostAOS_Force, 3*g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) );
CUDART_CHECK( cudaHostAlloc( (void **) &g_hostAOS_Force_Golden, 3*g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) );
CUDART_CHECK( cudaHostAlloc( (void **) &g_hostAOS_VelInvMass, 4*g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) );
CUDART_CHECK( cudaHostAlloc( (void **) &g_hostSOA_Mass, g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) );
CUDART_CHECK( cudaHostAlloc( (void **) &g_hostSOA_InvMass, g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) );
CUDART_CHECK( cudaMalloc( &g_dptrAOS_PosMass, 4*g_N*sizeof(float) ) );
CUDART_CHECK( cudaMalloc( (void **) &g_dptrAOS_Force, 3*g_N*sizeof(float) ) );
}
else {
g_hostAOS_PosMass = new float[4*g_N];
for ( int i = 0; i < 3; i++ ) {
g_hostSOA_Pos[i] = new float[g_N];
g_hostSOA_Force[i] = new float[g_N];
}
g_hostSOA_Mass = new float[g_N];
g_hostAOS_Force = new float[3*g_N];
g_hostAOS_Force_Golden = new float[3*g_N];
g_hostAOS_VelInvMass = new float[4*g_N];
g_hostSOA_Mass = new float[g_N];
g_hostSOA_InvMass = new float[g_N];
}
randomUnitBodies( g_hostAOS_PosMass, g_hostAOS_VelInvMass, g_N );
for ( size_t i = 0; i < g_N; i++ ) {
g_hostSOA_Mass[i] = g_hostAOS_PosMass[4*i+3];
g_hostSOA_InvMass[i] = 1.0f / g_hostSOA_Mass[i];
}
#if 0
// gather performance data over GPU implementations
// for different problem sizes.
printf( "kBodies\t" );
for ( int algorithm = GPU_AOS;
algorithm < sizeof(rgszAlgorithmNames)/sizeof(rgszAlgorithmNames[0]);
algorithm++ ) {
printf( "%s\t", rgszAlgorithmNames[algorithm] );
}
printf( "\n" );
for ( int kBodies = 3; kBodies <= 96; kBodies += 3 ) {
g_N = 1024*kBodies;
printf( "%d\t", kBodies );
for ( int algorithm = GPU_AOS;
algorithm < sizeof(rgszAlgorithmNames)/sizeof(rgszAlgorithmNames[0]);
algorithm++ ) {
float sum = 0.0f;
const int numIterations = 10;
for ( int i = 0; i < numIterations; i++ ) {
float ms, err;
if ( ! ComputeGravitation( &ms, &err, (nbodyAlgorithm_enum) algorithm, g_bCrossCheck ) ) {
fprintf( stderr, "Error computing timestep\n" );
exit(1);
}
sum += ms;
}
sum /= (float) numIterations;
double interactionsPerSecond = (double) g_N*g_N*1000.0f / sum;
if ( interactionsPerSecond > 1e9 ) {
printf ( "%.2f\t", interactionsPerSecond/1e9 );
}
else {
printf ( "%.3f\t", interactionsPerSecond/1e9 );
}
}
printf( "\n" );
}
return 0;
#endif
{
int kIterations = 0;
bool bStop = false;
while ( ! bStop ) {
float ms, err;
if ( ! ComputeGravitation( &ms, &err, g_Algorithm, g_bCrossCheck ) ) {
fprintf( stderr, "Error computing timestep\n" );
exit(1);
}
double interactionsPerSecond = (double) g_N*g_N*1000.0f / ms;
if ( interactionsPerSecond > 1e9 ) {
printf ( "\r%s: %8.2f ms = %8.3fx10^9 interactions/s (Rel. error: %E)\n",
rgszAlgorithmNames[g_Algorithm],
ms,
interactionsPerSecond/1e9,
err );
}
else {
printf ( "\r%s: %8.2f ms = %8.3fx10^6 interactions/s (Rel. error: %E)\n",
rgszAlgorithmNames[g_Algorithm],
ms,
interactionsPerSecond/1e6,
err );
}
if (kMaxIterations) {
kIterations++;
if (kIterations >= kMaxIterations) {
bStop = true;
}
}
if ( kbhit() ) {
char c = getch();
switch ( c ) {
case ' ':
if ( g_Algorithm == g_maxAlgorithm ) {
g_Algorithm = g_bNoCPU ? GPU_AOS : CPU_AOS;
// Skip slow CPU implementations if we are using SIMD for cross-check
if ( g_bUseSIMDForCrossCheck ) {
#if defined(HAVE_SIMD_THREADED)
g_Algorithm = CPU_SIMD_threaded;
#elif defined(HAVE_SIMD_OPENMP)
g_Algorithm = CPU_SIMD_openmp;
#endif
}
}
else {
g_Algorithm = (enum nbodyAlgorithm_enum) (g_Algorithm+1);
}
break;
case 'q':
case 'Q':
bStop = true;
break;
}
}
}
}
return 0;
Error:
if ( cudaSuccess != status ) {
printf( "CUDA Error: %s\n", cudaGetErrorString( status ) );
}
return 1;
}
|
488409f204da12523fb6ac9e44f3be509267c53a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_GATHER_LAYER_INSTANTIATE
#include "lbann/layers/transform/gather.hpp"
#include "lbann/utils/gpu/helpers.hpp"
#if defined(LBANN_HAS_DISTCONV) && defined(LBANN_HAS_NVSHMEM)
#include "lbann/utils/nvshmem.hpp"
#endif
namespace lbann {
namespace {
using Dim2 = gpu_lib::array<size_t, 2>;
using Dim3 = gpu_lib::array<size_t, 3>;
/** @brief Kernel for scattering a 3D tensor
*
* output(k,j,indices(k,i)) = values(k,j,i)
* output(k,indices(k,j),i) = values(k,j,i)
*
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (num_columns_input_mat / bdimx) x (num_rows / bdimy) x
* mb_size /bdimz
*/
template <typename T, bool has_row_vectors>
__global__ void scatter3d_kernel(const T* __restrict__ indices,
Dim2 indices_strides,
const T* __restrict__ values,
Dim3 values_dims,
Dim3 values_strides,
T* __restrict__ output,
Dim3 output_dims,
Dim3 output_strides)
{
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = gridDim.x * blockDim.x;
const size_t nthreadsy = gridDim.y * blockDim.y;
const size_t nthreadsz = gridDim.z * blockDim.z;
auto mini_batch_size = values_dims[0];
auto num_rows = values_dims[1];
auto num_value_columns = values_dims[2];
auto bounds = has_row_vectors ? output_dims[1] : output_dims[2];
for (size_t batch = gidz; batch < mini_batch_size; batch += nthreadsz) {
for (size_t row = gidy; row < num_rows; row += nthreadsy) {
for (size_t i = gidx; i < num_value_columns; i += nthreadsx) {
const auto& axis = has_row_vectors ? row : i;
const auto& index_offest = axis * indices_strides[1];
const auto ind = static_cast<El::Int>(
gpu_lib::floor(indices[batch * indices_strides[0] + index_offest]));
if (0 <= ind && ind < static_cast<El::Int>(bounds)) {
const auto& output_axis_1 =
has_row_vectors ? ind : static_cast<El::Int>(row);
const auto& output_axis_2 =
has_row_vectors ? static_cast<El::Int>(i) : ind;
const auto& output_offset = output_axis_1 * output_strides[1] +
output_axis_2 * output_strides[2];
const auto& x =
values[batch * values_strides[0] + row * values_strides[1] +
i * values_strides[2]];
auto& y = output[batch * output_strides[0] + output_offset];
gpu_lib::atomic_add(&y, x);
}
}
}
}
}
/** @brief Kernel for gathering a 3D
*
* output(k, j, i) = values(k, indices(k,j), i) axis == 0
* output(k, j, i) = values(k, j, indices(k,i)) axis == 1
*
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (num_columns_output_mat / bdimx) x (num_rows / bdimy) x
* mb_size /bdimz
*/
template <typename T, bool has_row_vectors>
__global__ void gather3d_kernel(const T* __restrict__ indices,
Dim2 indices_strides,
const T* __restrict__ values,
Dim3 values_dims,
Dim3 values_strides,
T* __restrict__ output,
Dim3 output_dims,
Dim3 output_strides)
{
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = gridDim.x * blockDim.x;
const size_t nthreadsy = gridDim.y * blockDim.y;
const size_t nthreadsz = gridDim.z * blockDim.z;
auto mini_batch_size = output_dims[0];
auto num_rows = output_dims[1];
auto num_out_columns = output_dims[2];
// If gathering along dim 0, the bounds are the number of row, otherwise
// bounds are the columns
auto bounds = has_row_vectors ? values_dims[1] : values_dims[2];
for (size_t batch = gidz; batch < mini_batch_size; batch += nthreadsz) {
for (size_t row = gidy; row < num_rows; row += nthreadsy) {
for (size_t i = gidx; i < num_out_columns; i += nthreadsx) {
// If gatherin along dim 0, the len(ind) == num_rows
const auto& axis = has_row_vectors ? row : i;
const auto& index_offest = axis * indices_strides[1];
const auto ind = static_cast<El::Int>(
gpu_lib::floor(indices[batch * indices_strides[0] + index_offest]));
auto& y = output[batch * output_strides[0] + row * output_strides[1] +
i * output_strides[2]];
const auto& output_axis_1 =
has_row_vectors ? ind : static_cast<El::Int>(row);
const auto& output_axis_2 =
has_row_vectors ? static_cast<El::Int>(i) : ind;
const auto& values_offset =
output_axis_1 * values_strides[1] + output_axis_2 * values_strides[2];
if (0 <= ind && ind < static_cast<El::Int>(bounds)) {
y = values[batch * values_strides[0] + values_offset];
}
else {
y = T{0.f};
}
}
}
}
}
} // namespace
// =============================================================
// Gather member functions
// =============================================================
template <typename TensorDataType, data_layout Layout, El::Device Device>
void gather_layer<TensorDataType, Layout, Device>::fp_compute()
{
#if defined LBANN_HAS_DISTCONV && defined LBANN_HAS_NVSHMEM
// Initialize the nvshmem here. No Op if already initialized
nvshmem::initialize();
if (this->distconv_enabled()) {
this->get_distconv_adapter().fp_compute();
return;
}
#endif // LBANN_HAS_DISTCONV && LBANN_HAS_NVSHMEM
// Local matrices
const auto& local_values = this->get_local_prev_activations(0);
const auto& local_indices = this->get_local_prev_activations(1);
auto& local_output = this->get_local_activations();
const size_t local_mini_batch_size = local_indices.Width();
const auto& input_dims_ = this->get_input_dims();
const auto& output_dims_ = this->get_output_dims();
std::vector<size_t> input_dims(input_dims_.begin(), input_dims_.end());
std::vector<size_t> output_dims(output_dims_.begin(), output_dims_.end());
const bool is_2D = input_dims.size() > 1;
const bool has_row_vectors = (is_2D && m_gather_axis == 0);
const size_t values_size = is_2D ? input_dims[1] : this->get_input_size(0);
const size_t output_size =
is_2D ? this->get_output_dims()[1] : this->get_output_size();
const size_t num_rows = (input_dims.size() > 1) ? input_dims[0] : 1;
const size_t num_output_rows =
has_row_vectors ? this->get_output_dims()[0] : num_rows;
const size_t value_stride_2 = is_2D ? values_size : 0;
const size_t output_stride_2 = is_2D ? output_size : 0;
// Gather into output tensor
if (!local_output.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_values),
gpu::get_sync_info(local_indices));
constexpr size_t block_size_x = 32;
constexpr size_t block_size_y = 8;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
block_dims.z = 1;
grid_dims.x = (values_size + block_dims.x - 1) / block_dims.x;
grid_dims.y = (num_rows + block_dims.y - 1) / block_dims.y;
grid_dims.z = (local_mini_batch_size + block_dims.z - 1) / block_dims.z;
gpu_lib::clip_grid_dims(grid_dims);
if (has_row_vectors) {
hydrogen::gpu::LaunchKernel(
gather3d_kernel<TensorDataType, true>,
grid_dims,
block_dims,
0,
multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_values.LockedBuffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values.LDim()), value_stride_2, 1},
local_output.Buffer(),
Dim3{local_mini_batch_size, num_output_rows, values_size},
Dim3{static_cast<size_t>(local_output.LDim()), output_stride_2, 1});
}
else {
hydrogen::gpu::LaunchKernel(
gather3d_kernel<TensorDataType, false>,
grid_dims,
block_dims,
0,
multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_values.LockedBuffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values.LDim()), value_stride_2, 1},
local_output.Buffer(),
Dim3{local_mini_batch_size, num_output_rows, output_size},
Dim3{static_cast<size_t>(local_output.LDim()), output_stride_2, 1});
}
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void gather_layer<TensorDataType, Layout, Device>::bp_compute()
{
#if defined LBANN_HAS_DISTCONV && defined LBANN_HAS_NVSHMEM
// Initialize the nvshmem here. No Op if already initialized
nvshmem::initialize();
if (this->distconv_enabled()) {
this->get_distconv_adapter().bp_compute();
return;
}
#endif // LBANN_HAS_DISTCONV && LBANN_HAS_NVSHMEM
// Local matrices
const auto& local_indices = this->get_local_prev_activations(1);
const auto& local_output_grad = this->get_local_prev_error_signals();
auto& local_values_grad = this->get_local_error_signals(0);
auto& local_indices_grad = this->get_local_error_signals(1);
const auto& input_dims_ = this->get_input_dims();
const auto& output_dims_ = this->get_output_dims();
std::vector<size_t> input_dims(input_dims_.begin(), input_dims_.end());
std::vector<size_t> output_dims(output_dims_.begin(), output_dims_.end());
const size_t local_mini_batch_size = local_indices.Width();
const bool is_2D = input_dims.size() > 1;
const bool has_row_vectors = (is_2D && m_gather_axis == 0);
const size_t values_size = is_2D ? input_dims[1] : this->get_input_size(0);
const size_t output_size =
is_2D ? this->get_output_dims()[1] : this->get_output_size();
const size_t num_rows = (input_dims.size() > 1) ? input_dims[0] : 1;
const size_t num_output_rows =
has_row_vectors ? this->get_output_dims()[0] : num_rows;
const size_t value_stride_2 = (input_dims.size() > 1) ? values_size : 0;
const size_t output_stride_2 = (input_dims.size() > 1) ? output_size : 0;
// Zero out gradient w.r.t. indices
El::Zero(local_indices_grad);
// Scatter into output matrix
El::Zero(local_values_grad);
if (!local_output_grad.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_values_grad),
gpu::get_sync_info(local_output_grad),
gpu::get_sync_info(local_indices));
constexpr size_t block_size_x = 32;
constexpr size_t block_size_y = 8;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
block_dims.z = 1;
grid_dims.x = (output_size + block_dims.x - 1) / block_dims.x;
grid_dims.y = (num_rows + block_dims.y - 1) / block_dims.y;
grid_dims.z = (local_mini_batch_size + block_dims.z - 1) / block_dims.z;
gpu_lib::clip_grid_dims(grid_dims);
if (has_row_vectors) {
hydrogen::gpu::LaunchKernel(
scatter3d_kernel<TensorDataType, true>,
grid_dims,
block_dims,
0,
multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_output_grad.LockedBuffer(),
Dim3{local_mini_batch_size, num_output_rows, output_size},
Dim3{static_cast<size_t>(local_output_grad.LDim()), output_stride_2, 1},
local_values_grad.Buffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values_grad.LDim()), value_stride_2, 1});
}
else {
hydrogen::gpu::LaunchKernel(
scatter3d_kernel<TensorDataType, false>,
grid_dims,
block_dims,
0,
multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_output_grad.LockedBuffer(),
Dim3{local_mini_batch_size, num_output_rows, output_size},
Dim3{static_cast<size_t>(local_output_grad.LDim()), output_stride_2, 1},
local_values_grad.Buffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values_grad.LDim()), value_stride_2, 1});
}
}
}
#define PROTO(T) \
template class gather_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| 488409f204da12523fb6ac9e44f3be509267c53a.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_GATHER_LAYER_INSTANTIATE
#include "lbann/layers/transform/gather.hpp"
#include "lbann/utils/gpu/helpers.hpp"
#if defined(LBANN_HAS_DISTCONV) && defined(LBANN_HAS_NVSHMEM)
#include "lbann/utils/nvshmem.hpp"
#endif
namespace lbann {
namespace {
using Dim2 = gpu_lib::array<size_t, 2>;
using Dim3 = gpu_lib::array<size_t, 3>;
/** @brief Kernel for scattering a 3D tensor
*
* output(k,j,indices(k,i)) = values(k,j,i)
* output(k,indices(k,j),i) = values(k,j,i)
*
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (num_columns_input_mat / bdimx) x (num_rows / bdimy) x
* mb_size /bdimz
*/
template <typename T, bool has_row_vectors>
__global__ void scatter3d_kernel(const T* __restrict__ indices,
Dim2 indices_strides,
const T* __restrict__ values,
Dim3 values_dims,
Dim3 values_strides,
T* __restrict__ output,
Dim3 output_dims,
Dim3 output_strides)
{
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = gridDim.x * blockDim.x;
const size_t nthreadsy = gridDim.y * blockDim.y;
const size_t nthreadsz = gridDim.z * blockDim.z;
auto mini_batch_size = values_dims[0];
auto num_rows = values_dims[1];
auto num_value_columns = values_dims[2];
auto bounds = has_row_vectors ? output_dims[1] : output_dims[2];
for (size_t batch = gidz; batch < mini_batch_size; batch += nthreadsz) {
for (size_t row = gidy; row < num_rows; row += nthreadsy) {
for (size_t i = gidx; i < num_value_columns; i += nthreadsx) {
const auto& axis = has_row_vectors ? row : i;
const auto& index_offest = axis * indices_strides[1];
const auto ind = static_cast<El::Int>(
gpu_lib::floor(indices[batch * indices_strides[0] + index_offest]));
if (0 <= ind && ind < static_cast<El::Int>(bounds)) {
const auto& output_axis_1 =
has_row_vectors ? ind : static_cast<El::Int>(row);
const auto& output_axis_2 =
has_row_vectors ? static_cast<El::Int>(i) : ind;
const auto& output_offset = output_axis_1 * output_strides[1] +
output_axis_2 * output_strides[2];
const auto& x =
values[batch * values_strides[0] + row * values_strides[1] +
i * values_strides[2]];
auto& y = output[batch * output_strides[0] + output_offset];
gpu_lib::atomic_add(&y, x);
}
}
}
}
}
/** @brief Kernel for gathering a 3D
*
* output(k, j, i) = values(k, indices(k,j), i) axis == 0
* output(k, j, i) = values(k, j, indices(k,i)) axis == 1
*
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (num_columns_output_mat / bdimx) x (num_rows / bdimy) x
* mb_size /bdimz
*/
template <typename T, bool has_row_vectors>
__global__ void gather3d_kernel(const T* __restrict__ indices,
Dim2 indices_strides,
const T* __restrict__ values,
Dim3 values_dims,
Dim3 values_strides,
T* __restrict__ output,
Dim3 output_dims,
Dim3 output_strides)
{
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = gridDim.x * blockDim.x;
const size_t nthreadsy = gridDim.y * blockDim.y;
const size_t nthreadsz = gridDim.z * blockDim.z;
auto mini_batch_size = output_dims[0];
auto num_rows = output_dims[1];
auto num_out_columns = output_dims[2];
// If gathering along dim 0, the bounds are the number of row, otherwise
// bounds are the columns
auto bounds = has_row_vectors ? values_dims[1] : values_dims[2];
for (size_t batch = gidz; batch < mini_batch_size; batch += nthreadsz) {
for (size_t row = gidy; row < num_rows; row += nthreadsy) {
for (size_t i = gidx; i < num_out_columns; i += nthreadsx) {
// If gatherin along dim 0, the len(ind) == num_rows
const auto& axis = has_row_vectors ? row : i;
const auto& index_offest = axis * indices_strides[1];
const auto ind = static_cast<El::Int>(
gpu_lib::floor(indices[batch * indices_strides[0] + index_offest]));
auto& y = output[batch * output_strides[0] + row * output_strides[1] +
i * output_strides[2]];
const auto& output_axis_1 =
has_row_vectors ? ind : static_cast<El::Int>(row);
const auto& output_axis_2 =
has_row_vectors ? static_cast<El::Int>(i) : ind;
const auto& values_offset =
output_axis_1 * values_strides[1] + output_axis_2 * values_strides[2];
if (0 <= ind && ind < static_cast<El::Int>(bounds)) {
y = values[batch * values_strides[0] + values_offset];
}
else {
y = T{0.f};
}
}
}
}
}
} // namespace
// =============================================================
// Gather member functions
// =============================================================
template <typename TensorDataType, data_layout Layout, El::Device Device>
void gather_layer<TensorDataType, Layout, Device>::fp_compute()
{
#if defined LBANN_HAS_DISTCONV && defined LBANN_HAS_NVSHMEM
// Initialize the nvshmem here. No Op if already initialized
nvshmem::initialize();
if (this->distconv_enabled()) {
this->get_distconv_adapter().fp_compute();
return;
}
#endif // LBANN_HAS_DISTCONV && LBANN_HAS_NVSHMEM
// Local matrices
const auto& local_values = this->get_local_prev_activations(0);
const auto& local_indices = this->get_local_prev_activations(1);
auto& local_output = this->get_local_activations();
const size_t local_mini_batch_size = local_indices.Width();
const auto& input_dims_ = this->get_input_dims();
const auto& output_dims_ = this->get_output_dims();
std::vector<size_t> input_dims(input_dims_.begin(), input_dims_.end());
std::vector<size_t> output_dims(output_dims_.begin(), output_dims_.end());
const bool is_2D = input_dims.size() > 1;
const bool has_row_vectors = (is_2D && m_gather_axis == 0);
const size_t values_size = is_2D ? input_dims[1] : this->get_input_size(0);
const size_t output_size =
is_2D ? this->get_output_dims()[1] : this->get_output_size();
const size_t num_rows = (input_dims.size() > 1) ? input_dims[0] : 1;
const size_t num_output_rows =
has_row_vectors ? this->get_output_dims()[0] : num_rows;
const size_t value_stride_2 = is_2D ? values_size : 0;
const size_t output_stride_2 = is_2D ? output_size : 0;
// Gather into output tensor
if (!local_output.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_values),
gpu::get_sync_info(local_indices));
constexpr size_t block_size_x = 32;
constexpr size_t block_size_y = 8;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
block_dims.z = 1;
grid_dims.x = (values_size + block_dims.x - 1) / block_dims.x;
grid_dims.y = (num_rows + block_dims.y - 1) / block_dims.y;
grid_dims.z = (local_mini_batch_size + block_dims.z - 1) / block_dims.z;
gpu_lib::clip_grid_dims(grid_dims);
if (has_row_vectors) {
hydrogen::gpu::LaunchKernel(
gather3d_kernel<TensorDataType, true>,
grid_dims,
block_dims,
0,
multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_values.LockedBuffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values.LDim()), value_stride_2, 1},
local_output.Buffer(),
Dim3{local_mini_batch_size, num_output_rows, values_size},
Dim3{static_cast<size_t>(local_output.LDim()), output_stride_2, 1});
}
else {
hydrogen::gpu::LaunchKernel(
gather3d_kernel<TensorDataType, false>,
grid_dims,
block_dims,
0,
multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_values.LockedBuffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values.LDim()), value_stride_2, 1},
local_output.Buffer(),
Dim3{local_mini_batch_size, num_output_rows, output_size},
Dim3{static_cast<size_t>(local_output.LDim()), output_stride_2, 1});
}
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void gather_layer<TensorDataType, Layout, Device>::bp_compute()
{
#if defined LBANN_HAS_DISTCONV && defined LBANN_HAS_NVSHMEM
// Initialize the nvshmem here. No Op if already initialized
nvshmem::initialize();
if (this->distconv_enabled()) {
this->get_distconv_adapter().bp_compute();
return;
}
#endif // LBANN_HAS_DISTCONV && LBANN_HAS_NVSHMEM
// Local matrices
const auto& local_indices = this->get_local_prev_activations(1);
const auto& local_output_grad = this->get_local_prev_error_signals();
auto& local_values_grad = this->get_local_error_signals(0);
auto& local_indices_grad = this->get_local_error_signals(1);
const auto& input_dims_ = this->get_input_dims();
const auto& output_dims_ = this->get_output_dims();
std::vector<size_t> input_dims(input_dims_.begin(), input_dims_.end());
std::vector<size_t> output_dims(output_dims_.begin(), output_dims_.end());
const size_t local_mini_batch_size = local_indices.Width();
const bool is_2D = input_dims.size() > 1;
const bool has_row_vectors = (is_2D && m_gather_axis == 0);
const size_t values_size = is_2D ? input_dims[1] : this->get_input_size(0);
const size_t output_size =
is_2D ? this->get_output_dims()[1] : this->get_output_size();
const size_t num_rows = (input_dims.size() > 1) ? input_dims[0] : 1;
const size_t num_output_rows =
has_row_vectors ? this->get_output_dims()[0] : num_rows;
const size_t value_stride_2 = (input_dims.size() > 1) ? values_size : 0;
const size_t output_stride_2 = (input_dims.size() > 1) ? output_size : 0;
// Zero out gradient w.r.t. indices
El::Zero(local_indices_grad);
// Scatter into output matrix
El::Zero(local_values_grad);
if (!local_output_grad.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_values_grad),
gpu::get_sync_info(local_output_grad),
gpu::get_sync_info(local_indices));
constexpr size_t block_size_x = 32;
constexpr size_t block_size_y = 8;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
block_dims.z = 1;
grid_dims.x = (output_size + block_dims.x - 1) / block_dims.x;
grid_dims.y = (num_rows + block_dims.y - 1) / block_dims.y;
grid_dims.z = (local_mini_batch_size + block_dims.z - 1) / block_dims.z;
gpu_lib::clip_grid_dims(grid_dims);
if (has_row_vectors) {
hydrogen::gpu::LaunchKernel(
scatter3d_kernel<TensorDataType, true>,
grid_dims,
block_dims,
0,
multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_output_grad.LockedBuffer(),
Dim3{local_mini_batch_size, num_output_rows, output_size},
Dim3{static_cast<size_t>(local_output_grad.LDim()), output_stride_2, 1},
local_values_grad.Buffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values_grad.LDim()), value_stride_2, 1});
}
else {
hydrogen::gpu::LaunchKernel(
scatter3d_kernel<TensorDataType, false>,
grid_dims,
block_dims,
0,
multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_output_grad.LockedBuffer(),
Dim3{local_mini_batch_size, num_output_rows, output_size},
Dim3{static_cast<size_t>(local_output_grad.LDim()), output_stride_2, 1},
local_values_grad.Buffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values_grad.LDim()), value_stride_2, 1});
}
}
}
#define PROTO(T) \
template class gather_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
1d34d659401d97c73ce93144873a438c6ab21beb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#include <hip/hip_cooperative_groups.h>
#include <stdio.h>
namespace cg = cooperative_groups;
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template <class T>
struct SharedMemory {
__device__ inline operator T *() {
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const {
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template <>
struct SharedMemory<double> {
__device__ inline operator double *() {
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const {
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
template <class T>
__global__ void reduce3(T *g_idata, T *g_odata, unsigned int n) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n) mySum += g_idata[i + blockDim.x];
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = mySum = mySum + sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
template <class T>
__global__ void reduce4(T *g_idata, T *g_odata, unsigned int n) {
int pos_start = blockIdx.x * blockDim.x + threadIdx.x;
int pos_step = blockDim.x * gridDim.x;
int warp_idx = pos_start / warpSize;
int warp_step = pos_step / warpSize;
int pop_cnt = __popc(pos_start);
for(int i=pos_start; i<n;i+=pos_step){
int sum = g_idata[i];
__syncwarp();
for(int delta=warpSize/2; delta>0;delta/=2){
sum += __shfl_down_sync((unsigned int)-1,sum,delta);
}
if((pos_start%warpSize) == 0) g_odata[warp_idx] = sum;
warp_idx += warp_step;
}
}
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2
// sized arrays
if (nIsPow2 || i + blockSize < n) mySum += g_idata[i + blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32) {
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = tile32.size() / 2; offset > 0; offset /= 2) {
mySum += tile32.shfl_down(mySum, offset);
}
}
// write result for this block to global mem
if (cta.thread_rank() == 0) g_odata[blockIdx.x] = mySum;
}
extern "C" bool isPow2(unsigned int x);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void reduce(int size, int threads, int blocks, int whichKernel, T *d_idata,
T *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize =
(threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
hipLaunchKernelGGL(( reduce4<T>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
//reduction using shufflecd ..
/* if (isPow2(size)) {
switch (threads) {
case 512:
reduce6<T, 512, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
reduce6<T, 512, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
}*/
}
// Instantiate the reduction function for 3 types
template void reduce<int>(int size, int threads, int blocks, int whichKernel,
int *d_idata, int *d_odata);
template void reduce<float>(int size, int threads, int blocks, int whichKernel,
float *d_idata, float *d_odata);
template void reduce<double>(int size, int threads, int blocks, int whichKernel,
double *d_idata, double *d_odata);
#endif // #ifndef _REDUCE_KERNEL_H_
| 1d34d659401d97c73ce93144873a438c6ab21beb.cu | /* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#include <cooperative_groups.h>
#include <stdio.h>
namespace cg = cooperative_groups;
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template <class T>
struct SharedMemory {
__device__ inline operator T *() {
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const {
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template <>
struct SharedMemory<double> {
__device__ inline operator double *() {
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const {
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
template <class T>
__global__ void reduce3(T *g_idata, T *g_odata, unsigned int n) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n) mySum += g_idata[i + blockDim.x];
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = mySum = mySum + sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
template <class T>
__global__ void reduce4(T *g_idata, T *g_odata, unsigned int n) {
int pos_start = blockIdx.x * blockDim.x + threadIdx.x;
int pos_step = blockDim.x * gridDim.x;
int warp_idx = pos_start / warpSize;
int warp_step = pos_step / warpSize;
int pop_cnt = __popc(pos_start);
for(int i=pos_start; i<n;i+=pos_step){
int sum = g_idata[i];
__syncwarp();
for(int delta=warpSize/2; delta>0;delta/=2){
sum += __shfl_down_sync((unsigned int)-1,sum,delta);
}
if((pos_start%warpSize) == 0) g_odata[warp_idx] = sum;
warp_idx += warp_step;
}
}
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2
// sized arrays
if (nIsPow2 || i + blockSize < n) mySum += g_idata[i + blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32) {
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = tile32.size() / 2; offset > 0; offset /= 2) {
mySum += tile32.shfl_down(mySum, offset);
}
}
// write result for this block to global mem
if (cta.thread_rank() == 0) g_odata[blockIdx.x] = mySum;
}
extern "C" bool isPow2(unsigned int x);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void reduce(int size, int threads, int blocks, int whichKernel, T *d_idata,
T *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize =
(threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
reduce4<T><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
//reduction using shufflecd ..
/* if (isPow2(size)) {
switch (threads) {
case 512:
reduce6<T, 512, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
reduce6<T, 512, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
}*/
}
// Instantiate the reduction function for 3 types
template void reduce<int>(int size, int threads, int blocks, int whichKernel,
int *d_idata, int *d_odata);
template void reduce<float>(int size, int threads, int blocks, int whichKernel,
float *d_idata, float *d_odata);
template void reduce<double>(int size, int threads, int blocks, int whichKernel,
double *d_idata, double *d_odata);
#endif // #ifndef _REDUCE_KERNEL_H_
|
e216ab02da617d810a325bb13bd2ad94414bfe7c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
typedef unsigned int uint;
#define WARP_SZ 32
#define BATCH_BLOCKS 1
__device__ int taskIdx; // "slateIdx"
__device__ inline int lane_id(void) { return threadIdx.x % WARP_SZ; }
__device__ __inline__ uint get_smid(void){
uint ret;
asm("mov.u32 %0, %smid;" : "=r"(ret) );
return ret;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(int sm_low, int sm_high,
int grid_size, int *block_index,
int *max_blocks,
float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Overriding the built-in CUDA variables (for executing blocks)
__shared__ int smid;
__shared__ int valid_task;
__shared__ int globIdx;
__shared__ int logicalBlockIdx;
__shared__ int physicalBlockIdx;
__shared__ uint3 shared_blockID;
//__shared__ uint3 shared_threadID;
// There is 30 SMs in TITAN XP (Each MP has 128 CUDA Cores, 4 WARPS)
//int sm_low = 0;
//int sm_high = 14;
const int leader = (threadIdx.x == 0 &&
threadIdx.y == 0 &&
threadIdx.z == 0);
if (leader) {
// logical block id init
logicalBlockIdx = 0;
// get SM id
smid = get_smid();
// check if the task is valid which means checking smid is within the range
valid_task = !(smid < sm_low || smid > sm_high);
//printf("Thread %d, smid: %u, valid_task: %d\n", th_idx, smid, valid_task);
}
__syncthreads();
if (!valid_task) {return;}
if (leader){
physicalBlockIdx = atomicAdd(&(block_index[smid]), 1);
}
__syncthreads();
int range = sm_high - sm_low + 1;
int cnt = 0;
/* Kernel Code */
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
//for (int a = aBegin, b = bBegin;
// a <= aEnd;
// a += aStep, b += bStep) {
while(1){
//while(physicalBlockIdx >= *max_blocks){
// physicalBlockIdx = block_index[smid];
//}
if (leader){
// pull tasks atomically
globIdx = atomicAdd(&taskIdx, BATCH_BLOCKS); // globIdx is old taskIdx
// (previously executed task index)
//
// the_next_globIdx = globIdx + range * BATCH_BLOCKS;
// the_next_globIdx is the indicator for monitoring queue status (monitoring whether the next block is in queue or not)
shared_blockID.x = globIdx % gridDim.x - 1;
shared_blockID.y = globIdx / gridDim.x;
}
// Calculating current thread idx (a kind of current work index)
// tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y
// wA = dimsA.x, wB = dimsB.x,
// aBegin = wA * BLOCK_SIZE * by; aStep = BLOCK_SIZE;
// bBegin = BLOCK_SIZE * bx; bStep = BLOCK_SIZE * wB;
uint3 blockID = {shared_blockID.x, shared_blockID.y, 1};
int block_idx = blockID.y * gridDim.x + blockID.x;
int thread_idx = block_idx * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
//As[ty][tx] = A[a + wA * ty + tx];
//Bs[ty][tx] = B[b + wB * ty + tx];
int a_d_index = aBegin + wA * ty + tx + aStep * cnt;
int b_d_index = bBegin + wB * ty + tx + bStep * cnt;
As[ty][tx] = A[a_d_index];
Bs[ty][tx] = B[b_d_index];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
//}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
cnt = cnt + 1;
// End of Kernel Code //
if (leader){
atomicSub(&(block_index[smid]), 1);
}
if(globIdx >= grid_size)
break;
}
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
printf("dimsA x: %d,y: %d\n", dimsA.x, dimsA.y);
printf("dimsB x: %d,y: %d\n", dimsB.x, dimsB.y);
printf("dimsC x: %d,y: %d\n", dimsC.x, dimsC.y);
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(block_size, block_size); // block_size
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // grid_size (in terms of block unit)
// Added by ysnam
const int num_sm = 30;
// set kernel launch configuration (?). it was originally from yjha's code (vectorAdd)
int grid_size = grid.x * grid.y * grid.z; // Total Number of Thread Blocks
int* block_index = 0; // Current Thread Blocks of each SM
int* max_blocks = 0; // The upper bound for Thread Blocks for SM
int host_max_blocks = 5;
// initializa variables
hipMalloc((void**)&block_index, sizeof(int)*(num_sm));
hipMemset(block_index, 0, sizeof(int) * (num_sm));
hipMalloc((void**)&max_blocks, sizeof(int));
hipMemset(max_blocks, 0, sizeof(int));
hipMemcpy(max_blocks, &host_max_blocks, sizeof(int), hipMemcpyHostToDevice);
// set upper and lower bound for SMs to execute kernels (SM allocation)
int currentIdx = 0;
int sm_low = 0;
int sm_high = 29;
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
checkCudaErrors(hipEventCreate(&start));
hipEvent_t stop;
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// Execute the kernel
int nIter = 300;
printf("done2\n");
do {
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
/*hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
*/
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
/*
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
*/
}
printf("hello loop!\n");
printf("[before] currentIdx: %d, taskIdx: %d, grid_size: %d\n",currentIdx, taskIdx, grid_size);
hipMemcpyFromSymbol(¤tIdx, taskIdx, sizeof(taskIdx), 0, hipMemcpyDeviceToHost);
printf("[after] currentIdx: %d, taskIdx: %d, grid_size: %d\n",currentIdx, taskIdx, grid_size);
}while(currentIdx < grid_size);
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
/*hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
*/
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
/*
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
*/
}
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
//printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
// i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipFree(block_index));
checkCudaErrors(hipFree(max_blocks));
printf("\nNOTE: The CUDA Samples are not meant for performance "\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
// before - for backup
// dimsA : 5 * 2 * block_size, 5 * 2 * block_size, 1
// dimsB : 5 * 4 * block_size, 5 * 2 * block_size, 1
// 50 -> ok, 250 -> ok, 300 -> ok
// 400 or 500 -> SIGSGEV
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| e216ab02da617d810a325bb13bd2ad94414bfe7c.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
typedef unsigned int uint;
#define WARP_SZ 32
#define BATCH_BLOCKS 1
__device__ int taskIdx; // "slateIdx"
__device__ inline int lane_id(void) { return threadIdx.x % WARP_SZ; }
__device__ __inline__ uint get_smid(void){
uint ret;
asm("mov.u32 %0, %smid;" : "=r"(ret) );
return ret;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(int sm_low, int sm_high,
int grid_size, int *block_index,
int *max_blocks,
float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Overriding the built-in CUDA variables (for executing blocks)
__shared__ int smid;
__shared__ int valid_task;
__shared__ int globIdx;
__shared__ int logicalBlockIdx;
__shared__ int physicalBlockIdx;
__shared__ uint3 shared_blockID;
//__shared__ uint3 shared_threadID;
// There is 30 SMs in TITAN XP (Each MP has 128 CUDA Cores, 4 WARPS)
//int sm_low = 0;
//int sm_high = 14;
const int leader = (threadIdx.x == 0 &&
threadIdx.y == 0 &&
threadIdx.z == 0);
if (leader) {
// logical block id init
logicalBlockIdx = 0;
// get SM id
smid = get_smid();
// check if the task is valid which means checking smid is within the range
valid_task = !(smid < sm_low || smid > sm_high);
//printf("Thread %d, smid: %u, valid_task: %d\n", th_idx, smid, valid_task);
}
__syncthreads();
if (!valid_task) {return;}
if (leader){
physicalBlockIdx = atomicAdd(&(block_index[smid]), 1);
}
__syncthreads();
int range = sm_high - sm_low + 1;
int cnt = 0;
/* Kernel Code */
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
//for (int a = aBegin, b = bBegin;
// a <= aEnd;
// a += aStep, b += bStep) {
while(1){
//while(physicalBlockIdx >= *max_blocks){
// physicalBlockIdx = block_index[smid];
//}
if (leader){
// pull tasks atomically
globIdx = atomicAdd(&taskIdx, BATCH_BLOCKS); // globIdx is old taskIdx
// (previously executed task index)
//
// the_next_globIdx = globIdx + range * BATCH_BLOCKS;
// the_next_globIdx is the indicator for monitoring queue status (monitoring whether the next block is in queue or not)
shared_blockID.x = globIdx % gridDim.x - 1;
shared_blockID.y = globIdx / gridDim.x;
}
// Calculating current thread idx (a kind of current work index)
// tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y
// wA = dimsA.x, wB = dimsB.x,
// aBegin = wA * BLOCK_SIZE * by; aStep = BLOCK_SIZE;
// bBegin = BLOCK_SIZE * bx; bStep = BLOCK_SIZE * wB;
uint3 blockID = {shared_blockID.x, shared_blockID.y, 1};
int block_idx = blockID.y * gridDim.x + blockID.x;
int thread_idx = block_idx * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
//As[ty][tx] = A[a + wA * ty + tx];
//Bs[ty][tx] = B[b + wB * ty + tx];
int a_d_index = aBegin + wA * ty + tx + aStep * cnt;
int b_d_index = bBegin + wB * ty + tx + bStep * cnt;
As[ty][tx] = A[a_d_index];
Bs[ty][tx] = B[b_d_index];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
//}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
cnt = cnt + 1;
// End of Kernel Code //
if (leader){
atomicSub(&(block_index[smid]), 1);
}
if(globIdx >= grid_size)
break;
}
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
printf("dimsA x: %d,y: %d\n", dimsA.x, dimsA.y);
printf("dimsB x: %d,y: %d\n", dimsB.x, dimsB.y);
printf("dimsC x: %d,y: %d\n", dimsC.x, dimsC.y);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(block_size, block_size); // block_size
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // grid_size (in terms of block unit)
// Added by ysnam
const int num_sm = 30;
// set kernel launch configuration (?). it was originally from yjha's code (vectorAdd)
int grid_size = grid.x * grid.y * grid.z; // Total Number of Thread Blocks
int* block_index = 0; // Current Thread Blocks of each SM
int* max_blocks = 0; // The upper bound for Thread Blocks for SM
int host_max_blocks = 5;
// initializa variables
cudaMalloc((void**)&block_index, sizeof(int)*(num_sm));
cudaMemset(block_index, 0, sizeof(int) * (num_sm));
cudaMalloc((void**)&max_blocks, sizeof(int));
cudaMemset(max_blocks, 0, sizeof(int));
cudaMemcpy(max_blocks, &host_max_blocks, sizeof(int), cudaMemcpyHostToDevice);
// set upper and lower bound for SMs to execute kernels (SM allocation)
int currentIdx = 0;
int sm_low = 0;
int sm_high = 29;
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads >>>(sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<< grid, threads >>>(sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
checkCudaErrors(cudaEventCreate(&start));
cudaEvent_t stop;
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// Execute the kernel
int nIter = 300;
printf("done2\n");
do {
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads >>>(sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
/* MatrixMulCUDA<16> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
*/
} else {
MatrixMulCUDA<32> <<< grid, threads >>>(sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
/*
MatrixMulCUDA<32> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
*/
}
printf("hello loop!\n");
printf("[before] currentIdx: %d, taskIdx: %d, grid_size: %d\n",currentIdx, taskIdx, grid_size);
cudaMemcpyFromSymbol(¤tIdx, taskIdx, sizeof(taskIdx), 0, cudaMemcpyDeviceToHost);
printf("[after] currentIdx: %d, taskIdx: %d, grid_size: %d\n",currentIdx, taskIdx, grid_size);
}while(currentIdx < grid_size);
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads >>>(sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
/* MatrixMulCUDA<16> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
*/
} else {
MatrixMulCUDA<32> <<< grid, threads >>>(sm_low, sm_high,
grid_size, block_index,
max_blocks,
d_C, d_A, d_B,
dimsA.x, dimsB.x);
/*
MatrixMulCUDA<32> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
*/
}
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
//printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
// i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaFree(block_index));
checkCudaErrors(cudaFree(max_blocks));
printf("\nNOTE: The CUDA Samples are not meant for performance "\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
// before - for backup
// dimsA : 5 * 2 * block_size, 5 * 2 * block_size, 1
// dimsB : 5 * 4 * block_size, 5 * 2 * block_size, 1
// 50 -> ok, 250 -> ok, 300 -> ok
// 400 or 500 -> SIGSGEV
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.