hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
fdbd1a8a056c7a9fb806110d58d90f2985b2a792.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "finalProject_kernel.cuh"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
struct SharedMemory
{
__device__ inline operator char *()
{
extern __shared__ char __smem[];
return (char *)__smem;
}
__device__ inline operator const char *() const
{
extern __shared__ char __smem[];
return (char *)__smem;
}
};
// kernel 5 + some more bit wise operations...
__global__ void kernel6(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
int worldSize = worldWidth * worldHeight;
int colors = 16;
char *sdata = SharedMemory();
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < worldSize;
cellId += blockDim.x * gridDim.x)
{
int x = cellId & (worldWidth - 1); // x=0
int yAbs = cellId - x; // yabs = 0
int xLeft = (x + worldWidth - 1) & (worldWidth - 1); // xleft=3
int xRight = (x + 1) & (worldWidth - 1); // xright=1
int yAbsUp = (yAbs + worldSize - worldWidth) & (worldSize - 1); // yabsup=12
int yAbsDown = (yAbs + worldWidth) & (worldSize - 1); // yabsdown=4
int mult = (threadIdx.x << 1) + threadIdx.x;
// load left neighbors to SM
sdata[mult + 0] = lifeData[xLeft + yAbsUp];
sdata[mult + 1] = lifeData[xLeft + yAbs];
sdata[mult + 2] = lifeData[xLeft + yAbsDown];
// if last thread - load 3 from current col and 3 from right
if (threadIdx.x == blockDim.x - 1)
{
sdata[mult + 3] = lifeData[x + yAbsUp];
sdata[mult + 4] = lifeData[x + yAbs];
sdata[mult + 5] = lifeData[x + yAbsDown];
sdata[mult + 6] = lifeData[xRight + yAbsUp];
sdata[mult + 7] = lifeData[xRight + yAbs];
sdata[mult + 8] = lifeData[xRight + yAbsDown];
}
__syncthreads();
// now we are ready to work.
// go to IF, and check neighbors in SM, and output to global memory.
int currCellLocInSData = 4 + mult;
char currCellColor = sdata[currCellLocInSData];
//char nextColor = (currCellColor + 1) % colors;
char nextColor = (currCellColor + 1) & (colors - 1);
if (((sdata[currCellLocInSData - 4] ^ nextColor) == 0) ||
((sdata[currCellLocInSData - 3] ^ nextColor) == 0) ||
((sdata[currCellLocInSData - 2] ^ nextColor) == 0) ||
((sdata[currCellLocInSData - 1] ^ nextColor) == 0) ||
((sdata[currCellLocInSData + 1] ^ nextColor) == 0) ||
((sdata[currCellLocInSData + 2] ^ nextColor) == 0) ||
((sdata[currCellLocInSData + 3] ^ nextColor) == 0) ||
((sdata[currCellLocInSData + 4] ^ nextColor) == 0))
{
resultLifeData[x + yAbs] = nextColor;
}
else
{
resultLifeData[x + yAbs] = currCellColor;
}
}
}
// no % operator
__global__ void kernel5(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
int worldSize = worldWidth * worldHeight;
int colors = 16;
char *sdata = SharedMemory();
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < worldSize;
cellId += blockDim.x * gridDim.x)
{
//int x = cellId % worldWidth; // x=0
int x = cellId & (worldWidth - 1); // x=0
int yAbs = cellId - x; // yabs = 0
//int xLeft = (x + worldWidth - 1) % worldWidth; // xleft=3
int xLeft = (x + worldWidth - 1) & (worldWidth - 1); // xleft=3
//int xRight = (x + 1) % worldWidth; // xright=1
int xRight = (x + 1) & (worldWidth - 1); // xright=1
//int yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; // yabsup=12
int yAbsUp = (yAbs + worldSize - worldWidth) & (worldSize - 1); // yabsup=12
//int yAbsDown = (yAbs + worldWidth) % worldSize; // yabsdown=4
int yAbsDown = (yAbs + worldWidth) & (worldSize - 1); // yabsdown=4
// load left neighbors to SM
sdata[threadIdx.x * 3 + 0] = lifeData[xLeft + yAbsUp];
sdata[threadIdx.x * 3 + 1] = lifeData[xLeft + yAbs];
sdata[threadIdx.x * 3 + 2] = lifeData[xLeft + yAbsDown];
// if last thread - load 3 from current col and 3 from right
if (threadIdx.x == blockDim.x - 1)
{
sdata[threadIdx.x * 3 + 3] = lifeData[x + yAbsUp];
sdata[threadIdx.x * 3 + 4] = lifeData[x + yAbs];
sdata[threadIdx.x * 3 + 5] = lifeData[x + yAbsDown];
sdata[threadIdx.x * 3 + 6] = lifeData[xRight + yAbsUp];
sdata[threadIdx.x * 3 + 7] = lifeData[xRight + yAbs];
sdata[threadIdx.x * 3 + 8] = lifeData[xRight + yAbsDown];
}
__syncthreads();
// now we are ready to work.
// go to IF, and check neighbors in SM, and output to global memory.
int currCellLocInSData = 4 + threadIdx.x * 3;
char currCellColor = sdata[currCellLocInSData];
//char nextColor = (currCellColor + 1) % colors;
char nextColor = (currCellColor + 1) & (colors - 1);
if ((sdata[currCellLocInSData - 4] == nextColor) ||
(sdata[currCellLocInSData - 3] == nextColor) ||
(sdata[currCellLocInSData - 2] == nextColor) ||
(sdata[currCellLocInSData - 1] == nextColor) ||
(sdata[currCellLocInSData + 1] == nextColor) ||
(sdata[currCellLocInSData + 2] == nextColor) ||
(sdata[currCellLocInSData + 3] == nextColor) ||
(sdata[currCellLocInSData + 4] == nextColor))
{
resultLifeData[x + yAbs] = nextColor;
}
else
{
resultLifeData[x + yAbs] = currCellColor;
}
}
}
// shared memory + less registers
__global__ void kernel4(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
__shared__ char sdata[1024 * 3 + 6];
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < __mul24(worldWidth , worldHeight);
cellId += blockDim.x * gridDim.x)
{
#define WORLD_SIZE (worldWidth * worldHeight)
#define X (cellId % worldWidth)
#define yAbs (cellId - X)
#define xLeft ((X + worldWidth - 1) % worldWidth)
#define xRight ((X + 1) % worldWidth)
#define yAbsUp ((yAbs + WORLD_SIZE - worldWidth) % WORLD_SIZE)
#define yAbsDown ( (yAbs + worldWidth) % WORLD_SIZE)
#define currCellColor (lifeData[X + yAbs])
#define nextColor ((currCellColor + 1) % 16)
// load left neighbors to SM
sdata[(threadIdx.x << 1) + threadIdx.x + 0] = lifeData[xLeft + yAbsUp];
sdata[(threadIdx.x << 1) + threadIdx.x + 1] = lifeData[xLeft + yAbs];
sdata[(threadIdx.x << 1) + threadIdx.x + 2] = lifeData[xLeft + yAbsDown];
// if last thread - load 3 from current col and 3 from right
if (threadIdx.x == blockDim.x - 1)
{
sdata[(threadIdx.x << 1) + threadIdx.x + 3] = lifeData[X + yAbsUp];
sdata[(threadIdx.x << 1) + threadIdx.x + 4] = lifeData[X + yAbs];
sdata[(threadIdx.x << 1) + threadIdx.x + 5] = lifeData[X + yAbsDown];
sdata[(threadIdx.x << 1) + threadIdx.x + 6] = lifeData[xRight + yAbsUp];
sdata[(threadIdx.x << 1) + threadIdx.x + 7] = lifeData[xRight + yAbs];
sdata[(threadIdx.x << 1) + threadIdx.x + 8] = lifeData[xRight + yAbsDown];
}
__syncthreads();
// now we are ready to work.
// go to IF, and check neighbors in SM, and output to global memory.
#define currCellLocInSData (4 + threadIdx.x * 3)
//char currCellColor = sdata[currCellLocInSData];
if ((sdata[currCellLocInSData - 4] == nextColor) ||
(sdata[currCellLocInSData - 3] == nextColor) ||
(sdata[currCellLocInSData - 2] == nextColor) ||
(sdata[currCellLocInSData - 1] == nextColor) ||
(sdata[currCellLocInSData + 1] == nextColor) ||
(sdata[currCellLocInSData + 2] == nextColor) ||
(sdata[currCellLocInSData + 3] == nextColor) ||
(sdata[currCellLocInSData + 4] == nextColor))
{
resultLifeData[X + yAbs] = nextColor;
}
else
{
resultLifeData[X + yAbs] = sdata[(4 + threadIdx.x * 3)];
}
}
#undef X
#undef yAbs
#undef xLeft
#undef xRight
#undef yAbsUp
#undef yAbsDown
#undef currCellColor
#undef nextColor
#undef currCellLocInSData
#undef WORLD_SIZE
}
// shared memory - danny!
__global__ void kernel3(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
int worldSize = worldWidth * worldHeight;
int colors = 16;
char *sdata = SharedMemory();
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < worldSize;
cellId += blockDim.x * gridDim.x)
{
int x = cellId % worldWidth; // x=0
int yAbs = cellId - x; // yabs = 0
int xLeft = (x + worldWidth - 1) % worldWidth; // xleft=3
int xRight = (x + 1) % worldWidth; // xright=1
int yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; // yabsup=12
int yAbsDown = (yAbs + worldWidth) % worldSize; // yabsdown=4
// load left neighbors to SM
sdata[threadIdx.x * 3 + 0] = lifeData[xLeft + yAbsUp];
sdata[threadIdx.x * 3 + 1] = lifeData[xLeft + yAbs];
sdata[threadIdx.x * 3 + 2] = lifeData[xLeft + yAbsDown];
// if last thread - load 3 from current col and 3 from right
if (threadIdx.x == blockDim.x - 1)
{
sdata[threadIdx.x * 3 + 3] = lifeData[x + yAbsUp];
sdata[threadIdx.x * 3 + 4] = lifeData[x + yAbs];
sdata[threadIdx.x * 3 + 5] = lifeData[x + yAbsDown];
sdata[threadIdx.x * 3 + 6] = lifeData[xRight + yAbsUp];
sdata[threadIdx.x * 3 + 7] = lifeData[xRight + yAbs];
sdata[threadIdx.x * 3 + 8] = lifeData[xRight + yAbsDown];
}
__syncthreads();
// now we are ready to work.
// go to IF, and check neighbors in SM, and output to global memory.
int currCellLocInSData = 4 + threadIdx.x * 3;
char currCellColor = sdata[currCellLocInSData];
char nextColor = (currCellColor + 1) % colors;
if ((sdata[currCellLocInSData - 4] == nextColor) ||
(sdata[currCellLocInSData - 3] == nextColor) ||
(sdata[currCellLocInSData - 2] == nextColor) ||
(sdata[currCellLocInSData - 1] == nextColor) ||
(sdata[currCellLocInSData + 1] == nextColor) ||
(sdata[currCellLocInSData + 2] == nextColor) ||
(sdata[currCellLocInSData + 3] == nextColor) ||
(sdata[currCellLocInSData + 4] == nextColor))
{
resultLifeData[x + yAbs] = nextColor;
}
else
{
resultLifeData[x + yAbs] = currCellColor;
}
}
}
/// pointer arithmetics
__global__ void kernel2(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
int worldSize = worldWidth * worldHeight;
int colors = 16;
//int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < worldSize;
cellId += blockDim.x * gridDim.x) {
int x = cellId % worldWidth; // x=0
int yAbs = cellId - x; // yabs = 0
int xLeft = (x + worldWidth - 1) % worldWidth; // xleft=3
int xRight = (x + 1) % worldWidth; // xright=1
int yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; // yabsup=12
int yAbsDown = (yAbs + worldWidth) % worldSize; // yabsdown=4
char currCellColor = lifeData[x + yAbs];
char nextColor = (currCellColor + 1) % colors;
if ((*(lifeData + xLeft + yAbsUp) == nextColor) || (*(lifeData + x + yAbsUp) == nextColor) || (*(lifeData + xRight + yAbsUp) == nextColor) || (*(lifeData + xLeft + yAbsDown) == nextColor) ||
(*(lifeData + x + yAbsDown) == nextColor) || (*(lifeData + xRight + yAbsDown) == nextColor) || (*(lifeData + xLeft + yAbs) == nextColor) || (*(lifeData + xRight + yAbs) == nextColor))
{
*(resultLifeData + x + yAbs) = nextColor;
}
else
{
*(resultLifeData + x + yAbs) = currCellColor;
}
}
}
// remove all possible variables (we actually dont need most of them
__global__ void kernel1(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
#define WORLD_SIZE (worldWidth * worldHeight)
//int worldSize = worldWidth * worldHeight;
int colors = 16;
//int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
//cellId < worldSize;
cellId < WORLD_SIZE;
cellId += blockDim.x * gridDim.x) {
#define x (cellId % worldWidth)
// int x = cellId % worldWidth; // x=0
#define yAbs (cellId - x)
// int yAbs = cellId - x; // yabs = 0
#define xLeft ((x + worldWidth - 1) % worldWidth)
// int xLeft = (x + worldWidth - 1) % worldWidth; // xleft=3
#define xRight ((x + 1) % worldWidth)
// int xRight = (x + 1) % worldWidth; // xright=1
//int yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; // yabsup=12
#define yAbsUp ((yAbs + WORLD_SIZE - worldWidth) % WORLD_SIZE)
// int yAbsUp = (yAbs + WORLD_SIZE - worldWidth) % WORLD_SIZE;
//int yAbsDown = (yAbs + worldWidth) % worldSize; // yabsdown=4
#define yAbsDown ( (yAbs + worldWidth) % WORLD_SIZE)
// int yAbsDown = (yAbs + worldWidth) % WORLD_SIZE; // yabsdown=4
//char currCellColor = lifeData[x + yAbs];
#define currCellColor (lifeData[x + yAbs])
// char nextColor = (currCellColor + 1) % colors;
#define nextColor ((currCellColor + 1) % colors)
if ((lifeData[xLeft + yAbsUp] == nextColor) || (lifeData[x + yAbsUp] == nextColor) || (lifeData[xRight + yAbsUp] == nextColor) || (lifeData[xLeft + yAbsDown] == nextColor) ||
(lifeData[x + yAbsDown] == nextColor) || (lifeData[xRight + yAbsDown] == nextColor) || (lifeData[xLeft + yAbs] == nextColor) || (lifeData[xRight + yAbs] == nextColor))
{
resultLifeData[x + yAbs] = nextColor;
}
else
{
resultLifeData[x + yAbs] = currCellColor;
}
}
#undef x
#undef yAbs
#undef xLeft
#undef xRight
#undef yAbsUp
#undef yAbsDown
#undef currCellColor
#undef nextColor
}
/// naive approach
__global__ void kernel0(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
int worldSize = worldWidth * worldHeight;
int colors = 16;
//int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < worldSize;
cellId += blockDim.x * gridDim.x) {
int x = cellId % worldWidth; // x=0
int yAbs = cellId - x; // yabs = 0
int xLeft = (x + worldWidth - 1) % worldWidth; // xleft=3
int xRight = (x + 1) % worldWidth; // xright=1
int yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; // yabsup=12
int yAbsDown = (yAbs + worldWidth) % worldSize; // yabsdown=4
char currCellColor = lifeData[x + yAbs];
char nextColor = (currCellColor + 1) % colors;
if ((lifeData[xLeft + yAbsUp] == nextColor) || (lifeData[x + yAbsUp] == nextColor) || (lifeData[xRight + yAbsUp] == nextColor) || (lifeData[xLeft + yAbsDown] == nextColor) ||
(lifeData[x + yAbsDown] == nextColor) || (lifeData[xRight + yAbsDown] == nextColor) || (lifeData[xLeft + yAbs] == nextColor) || (lifeData[xRight + yAbs] == nextColor))
{
resultLifeData[x + yAbs] = nextColor;
}
else
{
resultLifeData[x + yAbs] = currCellColor;
}
}
}
void reduce(int boardHeight, int boardWidth, int numThreads, int numBlocks, char** d_idata, char** d_odata, int epochs, int kernelId)
{
char* temp;
switch (kernelId)
{
case 0:
for (size_t i = 0; i < epochs; i++) {
hipDeviceSynchronize();
kernel0 << <numBlocks, numThreads>> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 1:
for (size_t i = 0; i < epochs; i++) {
hipDeviceSynchronize();
kernel1 << <numBlocks, numThreads >> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 2:
for (size_t i = 0; i < epochs; i++) {
hipDeviceSynchronize();
kernel2 << <numBlocks, numThreads>> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 3:
for (size_t i = 0; i < epochs; i++) {
hipDeviceSynchronize();
kernel3 << <numBlocks, numThreads, 1024 * 3 + 6 >> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 4:
for (size_t i = 0; i < epochs; i++) {
hipDeviceSynchronize();
kernel4 << <numBlocks, numThreads, 1024 * 3 + 6 >> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 5:
for (size_t i = 0; i < epochs; i++) {
hipDeviceSynchronize();
kernel5 << <numBlocks, numThreads, 1024 * 3 + 6 >> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 6:
for (size_t i = 0; i < epochs; i++) {
hipDeviceSynchronize();
kernel6 << <numBlocks, numThreads, 1024 * 3 + 6 >> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
default:
break;
}
checkCudaErrors(hipDeviceSynchronize());
}
| fdbd1a8a056c7a9fb806110d58d90f2985b2a792.cu | #include "finalProject_kernel.cuh"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
struct SharedMemory
{
__device__ inline operator char *()
{
extern __shared__ char __smem[];
return (char *)__smem;
}
__device__ inline operator const char *() const
{
extern __shared__ char __smem[];
return (char *)__smem;
}
};
// kernel 5 + some more bit wise operations...
__global__ void kernel6(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
int worldSize = worldWidth * worldHeight;
int colors = 16;
char *sdata = SharedMemory();
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < worldSize;
cellId += blockDim.x * gridDim.x)
{
int x = cellId & (worldWidth - 1); // x=0
int yAbs = cellId - x; // yabs = 0
int xLeft = (x + worldWidth - 1) & (worldWidth - 1); // xleft=3
int xRight = (x + 1) & (worldWidth - 1); // xright=1
int yAbsUp = (yAbs + worldSize - worldWidth) & (worldSize - 1); // yabsup=12
int yAbsDown = (yAbs + worldWidth) & (worldSize - 1); // yabsdown=4
int mult = (threadIdx.x << 1) + threadIdx.x;
// load left neighbors to SM
sdata[mult + 0] = lifeData[xLeft + yAbsUp];
sdata[mult + 1] = lifeData[xLeft + yAbs];
sdata[mult + 2] = lifeData[xLeft + yAbsDown];
// if last thread - load 3 from current col and 3 from right
if (threadIdx.x == blockDim.x - 1)
{
sdata[mult + 3] = lifeData[x + yAbsUp];
sdata[mult + 4] = lifeData[x + yAbs];
sdata[mult + 5] = lifeData[x + yAbsDown];
sdata[mult + 6] = lifeData[xRight + yAbsUp];
sdata[mult + 7] = lifeData[xRight + yAbs];
sdata[mult + 8] = lifeData[xRight + yAbsDown];
}
__syncthreads();
// now we are ready to work.
// go to IF, and check neighbors in SM, and output to global memory.
int currCellLocInSData = 4 + mult;
char currCellColor = sdata[currCellLocInSData];
//char nextColor = (currCellColor + 1) % colors;
char nextColor = (currCellColor + 1) & (colors - 1);
if (((sdata[currCellLocInSData - 4] ^ nextColor) == 0) ||
((sdata[currCellLocInSData - 3] ^ nextColor) == 0) ||
((sdata[currCellLocInSData - 2] ^ nextColor) == 0) ||
((sdata[currCellLocInSData - 1] ^ nextColor) == 0) ||
((sdata[currCellLocInSData + 1] ^ nextColor) == 0) ||
((sdata[currCellLocInSData + 2] ^ nextColor) == 0) ||
((sdata[currCellLocInSData + 3] ^ nextColor) == 0) ||
((sdata[currCellLocInSData + 4] ^ nextColor) == 0))
{
resultLifeData[x + yAbs] = nextColor;
}
else
{
resultLifeData[x + yAbs] = currCellColor;
}
}
}
// no % operator
__global__ void kernel5(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
int worldSize = worldWidth * worldHeight;
int colors = 16;
char *sdata = SharedMemory();
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < worldSize;
cellId += blockDim.x * gridDim.x)
{
//int x = cellId % worldWidth; // x=0
int x = cellId & (worldWidth - 1); // x=0
int yAbs = cellId - x; // yabs = 0
//int xLeft = (x + worldWidth - 1) % worldWidth; // xleft=3
int xLeft = (x + worldWidth - 1) & (worldWidth - 1); // xleft=3
//int xRight = (x + 1) % worldWidth; // xright=1
int xRight = (x + 1) & (worldWidth - 1); // xright=1
//int yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; // yabsup=12
int yAbsUp = (yAbs + worldSize - worldWidth) & (worldSize - 1); // yabsup=12
//int yAbsDown = (yAbs + worldWidth) % worldSize; // yabsdown=4
int yAbsDown = (yAbs + worldWidth) & (worldSize - 1); // yabsdown=4
// load left neighbors to SM
sdata[threadIdx.x * 3 + 0] = lifeData[xLeft + yAbsUp];
sdata[threadIdx.x * 3 + 1] = lifeData[xLeft + yAbs];
sdata[threadIdx.x * 3 + 2] = lifeData[xLeft + yAbsDown];
// if last thread - load 3 from current col and 3 from right
if (threadIdx.x == blockDim.x - 1)
{
sdata[threadIdx.x * 3 + 3] = lifeData[x + yAbsUp];
sdata[threadIdx.x * 3 + 4] = lifeData[x + yAbs];
sdata[threadIdx.x * 3 + 5] = lifeData[x + yAbsDown];
sdata[threadIdx.x * 3 + 6] = lifeData[xRight + yAbsUp];
sdata[threadIdx.x * 3 + 7] = lifeData[xRight + yAbs];
sdata[threadIdx.x * 3 + 8] = lifeData[xRight + yAbsDown];
}
__syncthreads();
// now we are ready to work.
// go to IF, and check neighbors in SM, and output to global memory.
int currCellLocInSData = 4 + threadIdx.x * 3;
char currCellColor = sdata[currCellLocInSData];
//char nextColor = (currCellColor + 1) % colors;
char nextColor = (currCellColor + 1) & (colors - 1);
if ((sdata[currCellLocInSData - 4] == nextColor) ||
(sdata[currCellLocInSData - 3] == nextColor) ||
(sdata[currCellLocInSData - 2] == nextColor) ||
(sdata[currCellLocInSData - 1] == nextColor) ||
(sdata[currCellLocInSData + 1] == nextColor) ||
(sdata[currCellLocInSData + 2] == nextColor) ||
(sdata[currCellLocInSData + 3] == nextColor) ||
(sdata[currCellLocInSData + 4] == nextColor))
{
resultLifeData[x + yAbs] = nextColor;
}
else
{
resultLifeData[x + yAbs] = currCellColor;
}
}
}
// shared memory + less registers
__global__ void kernel4(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
__shared__ char sdata[1024 * 3 + 6];
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < __mul24(worldWidth , worldHeight);
cellId += blockDim.x * gridDim.x)
{
#define WORLD_SIZE (worldWidth * worldHeight)
#define X (cellId % worldWidth)
#define yAbs (cellId - X)
#define xLeft ((X + worldWidth - 1) % worldWidth)
#define xRight ((X + 1) % worldWidth)
#define yAbsUp ((yAbs + WORLD_SIZE - worldWidth) % WORLD_SIZE)
#define yAbsDown ( (yAbs + worldWidth) % WORLD_SIZE)
#define currCellColor (lifeData[X + yAbs])
#define nextColor ((currCellColor + 1) % 16)
// load left neighbors to SM
sdata[(threadIdx.x << 1) + threadIdx.x + 0] = lifeData[xLeft + yAbsUp];
sdata[(threadIdx.x << 1) + threadIdx.x + 1] = lifeData[xLeft + yAbs];
sdata[(threadIdx.x << 1) + threadIdx.x + 2] = lifeData[xLeft + yAbsDown];
// if last thread - load 3 from current col and 3 from right
if (threadIdx.x == blockDim.x - 1)
{
sdata[(threadIdx.x << 1) + threadIdx.x + 3] = lifeData[X + yAbsUp];
sdata[(threadIdx.x << 1) + threadIdx.x + 4] = lifeData[X + yAbs];
sdata[(threadIdx.x << 1) + threadIdx.x + 5] = lifeData[X + yAbsDown];
sdata[(threadIdx.x << 1) + threadIdx.x + 6] = lifeData[xRight + yAbsUp];
sdata[(threadIdx.x << 1) + threadIdx.x + 7] = lifeData[xRight + yAbs];
sdata[(threadIdx.x << 1) + threadIdx.x + 8] = lifeData[xRight + yAbsDown];
}
__syncthreads();
// now we are ready to work.
// go to IF, and check neighbors in SM, and output to global memory.
#define currCellLocInSData (4 + threadIdx.x * 3)
//char currCellColor = sdata[currCellLocInSData];
if ((sdata[currCellLocInSData - 4] == nextColor) ||
(sdata[currCellLocInSData - 3] == nextColor) ||
(sdata[currCellLocInSData - 2] == nextColor) ||
(sdata[currCellLocInSData - 1] == nextColor) ||
(sdata[currCellLocInSData + 1] == nextColor) ||
(sdata[currCellLocInSData + 2] == nextColor) ||
(sdata[currCellLocInSData + 3] == nextColor) ||
(sdata[currCellLocInSData + 4] == nextColor))
{
resultLifeData[X + yAbs] = nextColor;
}
else
{
resultLifeData[X + yAbs] = sdata[(4 + threadIdx.x * 3)];
}
}
#undef X
#undef yAbs
#undef xLeft
#undef xRight
#undef yAbsUp
#undef yAbsDown
#undef currCellColor
#undef nextColor
#undef currCellLocInSData
#undef WORLD_SIZE
}
// shared memory - danny!
__global__ void kernel3(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
int worldSize = worldWidth * worldHeight;
int colors = 16;
char *sdata = SharedMemory();
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < worldSize;
cellId += blockDim.x * gridDim.x)
{
int x = cellId % worldWidth; // x=0
int yAbs = cellId - x; // yabs = 0
int xLeft = (x + worldWidth - 1) % worldWidth; // xleft=3
int xRight = (x + 1) % worldWidth; // xright=1
int yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; // yabsup=12
int yAbsDown = (yAbs + worldWidth) % worldSize; // yabsdown=4
// load left neighbors to SM
sdata[threadIdx.x * 3 + 0] = lifeData[xLeft + yAbsUp];
sdata[threadIdx.x * 3 + 1] = lifeData[xLeft + yAbs];
sdata[threadIdx.x * 3 + 2] = lifeData[xLeft + yAbsDown];
// if last thread - load 3 from current col and 3 from right
if (threadIdx.x == blockDim.x - 1)
{
sdata[threadIdx.x * 3 + 3] = lifeData[x + yAbsUp];
sdata[threadIdx.x * 3 + 4] = lifeData[x + yAbs];
sdata[threadIdx.x * 3 + 5] = lifeData[x + yAbsDown];
sdata[threadIdx.x * 3 + 6] = lifeData[xRight + yAbsUp];
sdata[threadIdx.x * 3 + 7] = lifeData[xRight + yAbs];
sdata[threadIdx.x * 3 + 8] = lifeData[xRight + yAbsDown];
}
__syncthreads();
// now we are ready to work.
// go to IF, and check neighbors in SM, and output to global memory.
int currCellLocInSData = 4 + threadIdx.x * 3;
char currCellColor = sdata[currCellLocInSData];
char nextColor = (currCellColor + 1) % colors;
if ((sdata[currCellLocInSData - 4] == nextColor) ||
(sdata[currCellLocInSData - 3] == nextColor) ||
(sdata[currCellLocInSData - 2] == nextColor) ||
(sdata[currCellLocInSData - 1] == nextColor) ||
(sdata[currCellLocInSData + 1] == nextColor) ||
(sdata[currCellLocInSData + 2] == nextColor) ||
(sdata[currCellLocInSData + 3] == nextColor) ||
(sdata[currCellLocInSData + 4] == nextColor))
{
resultLifeData[x + yAbs] = nextColor;
}
else
{
resultLifeData[x + yAbs] = currCellColor;
}
}
}
/// pointer arithmetics
__global__ void kernel2(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
int worldSize = worldWidth * worldHeight;
int colors = 16;
//int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < worldSize;
cellId += blockDim.x * gridDim.x) {
int x = cellId % worldWidth; // x=0
int yAbs = cellId - x; // yabs = 0
int xLeft = (x + worldWidth - 1) % worldWidth; // xleft=3
int xRight = (x + 1) % worldWidth; // xright=1
int yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; // yabsup=12
int yAbsDown = (yAbs + worldWidth) % worldSize; // yabsdown=4
char currCellColor = lifeData[x + yAbs];
char nextColor = (currCellColor + 1) % colors;
if ((*(lifeData + xLeft + yAbsUp) == nextColor) || (*(lifeData + x + yAbsUp) == nextColor) || (*(lifeData + xRight + yAbsUp) == nextColor) || (*(lifeData + xLeft + yAbsDown) == nextColor) ||
(*(lifeData + x + yAbsDown) == nextColor) || (*(lifeData + xRight + yAbsDown) == nextColor) || (*(lifeData + xLeft + yAbs) == nextColor) || (*(lifeData + xRight + yAbs) == nextColor))
{
*(resultLifeData + x + yAbs) = nextColor;
}
else
{
*(resultLifeData + x + yAbs) = currCellColor;
}
}
}
// remove all possible variables (we actually dont need most of them
__global__ void kernel1(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
#define WORLD_SIZE (worldWidth * worldHeight)
//int worldSize = worldWidth * worldHeight;
int colors = 16;
//int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
//cellId < worldSize;
cellId < WORLD_SIZE;
cellId += blockDim.x * gridDim.x) {
#define x (cellId % worldWidth)
// int x = cellId % worldWidth; // x=0
#define yAbs (cellId - x)
// int yAbs = cellId - x; // yabs = 0
#define xLeft ((x + worldWidth - 1) % worldWidth)
// int xLeft = (x + worldWidth - 1) % worldWidth; // xleft=3
#define xRight ((x + 1) % worldWidth)
// int xRight = (x + 1) % worldWidth; // xright=1
//int yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; // yabsup=12
#define yAbsUp ((yAbs + WORLD_SIZE - worldWidth) % WORLD_SIZE)
// int yAbsUp = (yAbs + WORLD_SIZE - worldWidth) % WORLD_SIZE;
//int yAbsDown = (yAbs + worldWidth) % worldSize; // yabsdown=4
#define yAbsDown ( (yAbs + worldWidth) % WORLD_SIZE)
// int yAbsDown = (yAbs + worldWidth) % WORLD_SIZE; // yabsdown=4
//char currCellColor = lifeData[x + yAbs];
#define currCellColor (lifeData[x + yAbs])
// char nextColor = (currCellColor + 1) % colors;
#define nextColor ((currCellColor + 1) % colors)
if ((lifeData[xLeft + yAbsUp] == nextColor) || (lifeData[x + yAbsUp] == nextColor) || (lifeData[xRight + yAbsUp] == nextColor) || (lifeData[xLeft + yAbsDown] == nextColor) ||
(lifeData[x + yAbsDown] == nextColor) || (lifeData[xRight + yAbsDown] == nextColor) || (lifeData[xLeft + yAbs] == nextColor) || (lifeData[xRight + yAbs] == nextColor))
{
resultLifeData[x + yAbs] = nextColor;
}
else
{
resultLifeData[x + yAbs] = currCellColor;
}
}
#undef x
#undef yAbs
#undef xLeft
#undef xRight
#undef yAbsUp
#undef yAbsDown
#undef currCellColor
#undef nextColor
}
/// naive approach
__global__ void kernel0(char* lifeData, int worldWidth, int worldHeight, char* resultLifeData)
{
int worldSize = worldWidth * worldHeight;
int colors = 16;
//int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
for (int cellId = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
cellId < worldSize;
cellId += blockDim.x * gridDim.x) {
int x = cellId % worldWidth; // x=0
int yAbs = cellId - x; // yabs = 0
int xLeft = (x + worldWidth - 1) % worldWidth; // xleft=3
int xRight = (x + 1) % worldWidth; // xright=1
int yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; // yabsup=12
int yAbsDown = (yAbs + worldWidth) % worldSize; // yabsdown=4
char currCellColor = lifeData[x + yAbs];
char nextColor = (currCellColor + 1) % colors;
if ((lifeData[xLeft + yAbsUp] == nextColor) || (lifeData[x + yAbsUp] == nextColor) || (lifeData[xRight + yAbsUp] == nextColor) || (lifeData[xLeft + yAbsDown] == nextColor) ||
(lifeData[x + yAbsDown] == nextColor) || (lifeData[xRight + yAbsDown] == nextColor) || (lifeData[xLeft + yAbs] == nextColor) || (lifeData[xRight + yAbs] == nextColor))
{
resultLifeData[x + yAbs] = nextColor;
}
else
{
resultLifeData[x + yAbs] = currCellColor;
}
}
}
void reduce(int boardHeight, int boardWidth, int numThreads, int numBlocks, char** d_idata, char** d_odata, int epochs, int kernelId)
{
char* temp;
switch (kernelId)
{
case 0:
for (size_t i = 0; i < epochs; i++) {
cudaDeviceSynchronize();
kernel0 << <numBlocks, numThreads>> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 1:
for (size_t i = 0; i < epochs; i++) {
cudaDeviceSynchronize();
kernel1 << <numBlocks, numThreads >> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 2:
for (size_t i = 0; i < epochs; i++) {
cudaDeviceSynchronize();
kernel2 << <numBlocks, numThreads>> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 3:
for (size_t i = 0; i < epochs; i++) {
cudaDeviceSynchronize();
kernel3 << <numBlocks, numThreads, 1024 * 3 + 6 >> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 4:
for (size_t i = 0; i < epochs; i++) {
cudaDeviceSynchronize();
kernel4 << <numBlocks, numThreads, 1024 * 3 + 6 >> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 5:
for (size_t i = 0; i < epochs; i++) {
cudaDeviceSynchronize();
kernel5 << <numBlocks, numThreads, 1024 * 3 + 6 >> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
case 6:
for (size_t i = 0; i < epochs; i++) {
cudaDeviceSynchronize();
kernel6 << <numBlocks, numThreads, 1024 * 3 + 6 >> >(*d_idata, boardHeight, boardWidth, *d_odata);
std::swap(*d_idata, *d_odata);
}
break;
default:
break;
}
checkCudaErrors(cudaDeviceSynchronize());
}
|
56710ba3b6a173906d1b9549a0891ce2e3d9846e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2020 Christopher Khan
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the license at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Description of GPU_kernels_single_precision.cu:
// This file contains the CUDA code that allows for performing the computations
// for GENRE on a GPU using single precision
// Define the GPU kernel that performs predictor normalization
__global__ void predictor_normalization(float * X_matrix_d, float * scaling_factors_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be normalized
int start_ind = 0;
// This if statement makes sure to not normalize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Normalize each predictor column so that the sum of the square of each predictor column is equal to 1
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the square of the predictor column
float sum_squared = 0.0f;
// Calculate the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_squared = sum_squared + (X_value * X_value);
}
// Calculate the square root of the sum of the square of the predictor column
float square_root_sum_squared = sqrtf(sum_squared);
// Store the square root of the sum of the square of the predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = square_root_sum_squared;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] / square_root_sum_squared;
}
}
// This if statement stores a scaling factor of 1 for the predictor column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that performs predictor standardization
__global__ void predictor_standardization(float * X_matrix_d, float * scaling_factors_d, float * mean_X_matrix_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Standardize each predictor column by subtracting the mean of the predictor column from each observation and diving each observation by the standard deviation of the predictor column
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the predictor column
float sum_value = 0.0f;
// Calculate the sum of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_value = sum_value + X_value;
}
// Calculate the mean of the predictor column
float mean_value = sum_value / (float)num_observations;
// Store the mean of the predictor column
mean_X_matrix_d[predictor_thread_stride + predictor_column] = mean_value;
// Declare and initialize the variable that stores the sum of the square of the demeaned predictor column
float sum_squared = 0.0f;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value_demeaned = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value;
sum_squared = sum_squared + (X_value_demeaned * X_value_demeaned);
}
// Calculate the standard deviation of the demeaned predictor column
float std = sqrtf(sum_squared / (float)num_observations);
// Store the standard deviation of the demeaned predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = std;
// Standardize the predictor column by subtracting its mean and dividing by its standard deviation
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = (X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value) / std;
}
}
// This if statement stores a scaling factor of 1 and a mean of 1 for the first column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
mean_X_matrix_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that calculates the standard deviations for each portion of the y_d array, standardizes the y_d array, and calculates the standardized lambda values
__global__ void model_fit_preparation(float * y_d, float * residual_y_d, float * model_fit_flag_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * observation_thread_stride_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations in the cropped_y_d array for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Declare and initialize the variable that stores the running sum of y for the fit
float sum_value = 0.0f;
// Calculate the running sums for sum_value
for (int observation = 0; observation < num_observations; observation++) {
float value = y_d[observation_thread_stride + observation];
sum_value += value;
}
// Calculate the mean of y for the fit
float mean = sum_value / (float)num_observations;
// Declare and initialize the variable that stores the standard deviation of y for the fit
float std = 0.0f;
// Calculate the standard deviation of y for the fit
for (int observation = 0; observation < num_observations; observation++) {
float value_2 = y_d[observation_thread_stride + observation];
std += ((value_2 - mean) * (value_2 - mean));
}
std = sqrtf(std / (float)num_observations);
// Store the standard deviation of y for the fit in the y_std_d array
y_std_d[fit_ind] = std;
// This if statement standardizes the lambda values and the y data if the standard deviation isn't 0
if (std != 0.0f) {
// Set the model fit flag to 1 if the standard deviation is not 0 and a model fit should be performed
model_fit_flag_d[fit_ind] = 1.0f;
// Calculate the standardized lambda value and store it into the standardized_lambda_d array
standardized_lambda_values_d[fit_ind] = standardized_lambda_values_d[fit_ind] / std;
// Standardize y for the fit and store it into the y_d array and the residual_y_d array
for (int observation = 0; observation < num_observations; observation++) {
float standardized_value = y_d[observation_thread_stride + observation] / std;
y_d[observation_thread_stride + observation] = standardized_value;
residual_y_d[observation_thread_stride + observation] = standardized_value;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = residual_y_d[observation_thread_stride + observation_row] + (X_value * B_j);
// Store the updated residual value back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = residual_y_d[observation_thread_stride + observation_row];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_d[observation_thread_stride + observation_row] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit_shared_memory(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Define the shared memory array that stores the residual values of the model fits within one block (the amount of bytes is declared in the GPU kernel call)
extern __shared__ float sdata[];
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Store the residual values for the fit into the shared memory array
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
int store_ind = (observation_row * num_threads_per_block) + block_thread_ind;
sdata[store_ind] = residual_y_d[observation_thread_stride + observation_row];
}
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind] + (X_value * B_j);
// Store the updated residual value back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = sdata[(observation_row * num_threads_per_block) + block_thread_ind] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unnormalization
__global__ void predictor_coefficient_unnormalization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unnormalized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Unnormalize the predictor coefficients
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
B_d[predictor_thread_stride + predictor_column] = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unstandardization
__global__ void predictor_coefficient_unstandardization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, float * mean_X_matrix_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unstandardized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Declare and initialize the variable that is used to adjust the intercept term if it is included
float sum_value = 0.0f;
// Perform predictor coefficient unstandardization
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
float B_unstandardized = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
B_d[predictor_thread_stride + predictor_column] = B_unstandardized;
sum_value = sum_value + (B_unstandardized * mean_X_matrix_d[predictor_thread_stride + predictor_column]);
}
// Adjust the intercept term if it is included
if (intercept_flag == 1) {
B_d[predictor_thread_stride] = B_d[predictor_thread_stride] - sum_value;
}
}
}
}
| 56710ba3b6a173906d1b9549a0891ce2e3d9846e.cu | // Copyright 2020 Christopher Khan
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the license at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Description of GPU_kernels_single_precision.cu:
// This file contains the CUDA code that allows for performing the computations
// for GENRE on a GPU using single precision
// Define the GPU kernel that performs predictor normalization
__global__ void predictor_normalization(float * X_matrix_d, float * scaling_factors_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be normalized
int start_ind = 0;
// This if statement makes sure to not normalize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Normalize each predictor column so that the sum of the square of each predictor column is equal to 1
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the square of the predictor column
float sum_squared = 0.0f;
// Calculate the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_squared = sum_squared + (X_value * X_value);
}
// Calculate the square root of the sum of the square of the predictor column
float square_root_sum_squared = sqrtf(sum_squared);
// Store the square root of the sum of the square of the predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = square_root_sum_squared;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] / square_root_sum_squared;
}
}
// This if statement stores a scaling factor of 1 for the predictor column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that performs predictor standardization
__global__ void predictor_standardization(float * X_matrix_d, float * scaling_factors_d, float * mean_X_matrix_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Standardize each predictor column by subtracting the mean of the predictor column from each observation and diving each observation by the standard deviation of the predictor column
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the predictor column
float sum_value = 0.0f;
// Calculate the sum of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_value = sum_value + X_value;
}
// Calculate the mean of the predictor column
float mean_value = sum_value / (float)num_observations;
// Store the mean of the predictor column
mean_X_matrix_d[predictor_thread_stride + predictor_column] = mean_value;
// Declare and initialize the variable that stores the sum of the square of the demeaned predictor column
float sum_squared = 0.0f;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value_demeaned = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value;
sum_squared = sum_squared + (X_value_demeaned * X_value_demeaned);
}
// Calculate the standard deviation of the demeaned predictor column
float std = sqrtf(sum_squared / (float)num_observations);
// Store the standard deviation of the demeaned predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = std;
// Standardize the predictor column by subtracting its mean and dividing by its standard deviation
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = (X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value) / std;
}
}
// This if statement stores a scaling factor of 1 and a mean of 1 for the first column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
mean_X_matrix_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that calculates the standard deviations for each portion of the y_d array, standardizes the y_d array, and calculates the standardized lambda values
__global__ void model_fit_preparation(float * y_d, float * residual_y_d, float * model_fit_flag_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * observation_thread_stride_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations in the cropped_y_d array for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Declare and initialize the variable that stores the running sum of y for the fit
float sum_value = 0.0f;
// Calculate the running sums for sum_value
for (int observation = 0; observation < num_observations; observation++) {
float value = y_d[observation_thread_stride + observation];
sum_value += value;
}
// Calculate the mean of y for the fit
float mean = sum_value / (float)num_observations;
// Declare and initialize the variable that stores the standard deviation of y for the fit
float std = 0.0f;
// Calculate the standard deviation of y for the fit
for (int observation = 0; observation < num_observations; observation++) {
float value_2 = y_d[observation_thread_stride + observation];
std += ((value_2 - mean) * (value_2 - mean));
}
std = sqrtf(std / (float)num_observations);
// Store the standard deviation of y for the fit in the y_std_d array
y_std_d[fit_ind] = std;
// This if statement standardizes the lambda values and the y data if the standard deviation isn't 0
if (std != 0.0f) {
// Set the model fit flag to 1 if the standard deviation is not 0 and a model fit should be performed
model_fit_flag_d[fit_ind] = 1.0f;
// Calculate the standardized lambda value and store it into the standardized_lambda_d array
standardized_lambda_values_d[fit_ind] = standardized_lambda_values_d[fit_ind] / std;
// Standardize y for the fit and store it into the y_d array and the residual_y_d array
for (int observation = 0; observation < num_observations; observation++) {
float standardized_value = y_d[observation_thread_stride + observation] / std;
y_d[observation_thread_stride + observation] = standardized_value;
residual_y_d[observation_thread_stride + observation] = standardized_value;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = residual_y_d[observation_thread_stride + observation_row] + (X_value * B_j);
// Store the updated residual value back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = residual_y_d[observation_thread_stride + observation_row];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_d[observation_thread_stride + observation_row] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit_shared_memory(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Define the shared memory array that stores the residual values of the model fits within one block (the amount of bytes is declared in the GPU kernel call)
extern __shared__ float sdata[];
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Store the residual values for the fit into the shared memory array
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
int store_ind = (observation_row * num_threads_per_block) + block_thread_ind;
sdata[store_ind] = residual_y_d[observation_thread_stride + observation_row];
}
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind] + (X_value * B_j);
// Store the updated residual value back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = sdata[(observation_row * num_threads_per_block) + block_thread_ind] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unnormalization
__global__ void predictor_coefficient_unnormalization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unnormalized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Unnormalize the predictor coefficients
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
B_d[predictor_thread_stride + predictor_column] = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unstandardization
__global__ void predictor_coefficient_unstandardization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, float * mean_X_matrix_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unstandardized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Declare and initialize the variable that is used to adjust the intercept term if it is included
float sum_value = 0.0f;
// Perform predictor coefficient unstandardization
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
float B_unstandardized = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
B_d[predictor_thread_stride + predictor_column] = B_unstandardized;
sum_value = sum_value + (B_unstandardized * mean_X_matrix_d[predictor_thread_stride + predictor_column]);
}
// Adjust the intercept term if it is included
if (intercept_flag == 1) {
B_d[predictor_thread_stride] = B_d[predictor_thread_stride] - sum_value;
}
}
}
}
|
8d11671518fc1478f6a50c171ac00ec4429ac16c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define epsilon 0.00001
#define maxRep 40
__global__ void meanShift(double *x,size_t pitchx,double *y, size_t pitchy,double *ynew,size_t pitchynew,int N,int d,double sigma);
__device__ double calcDist(double *y,size_t pitchy,double *x,size_t pitchx,int d);
__device__ double gausK(double x,double sigma);
double froNorm(double *a,size_t pitcha,double *b,size_t pitchb,int N,int d);
void test(double *y,size_t pitchy,char *testfile,int N,int d);
int main(int argc,char **argv){
if(argc!=4){
printf("Usage: %s (dataset) (test) (sigma) where (dataset) ",argv[0]);
printf("is the name of the dataset .txt file, (test) is the name of the ");
printf(".txt test file and (sigma) is the value of sigma for the current dataset\n");
exit(1);
}
struct timeval startwtime, endwtime;
double time;
//turn (sigma) input from string to double
double sigma=atof(argv[3]);
int i,j;
//argv[1] is the (dataset) file
FILE *file = fopen(argv[1], "r");
if(file==NULL){
printf("Couldn't open %s\n",argv[1]);
exit(1);
}
//count the number of points and dimensions of (dataset)
int d=0,N=0;
char ch;
/**dimension and number of points counting found in
*https://www.opentechguides.com/how-to/article/c/72/c-file-counts.html
*/
while ((ch=getc(file)) != EOF) {
if ((ch == ' ' || ch == '\n') && N==0) { ++d; }
if (ch == '\n') { ++N; }
}
//1 dimension host memory allocation to fit hipMemcpy2D
double *y;
size_t pitchy = sizeof(double) * d;
y = (double*)malloc(sizeof(double) * N * d);
double *ynew;
size_t pitchynew = sizeof(double) * d;
ynew = (double*)malloc(sizeof(double) * N * d);
double *x;
size_t pitchx = sizeof(double) * d;
x = (double*)malloc(sizeof(double) * N * d);
double *row_x,*row_y;
//return file pointer to the beggining of the file
fseek(file, 0, SEEK_SET);
for (i=0;i<N;i++){
row_x = (double*)((char*)x + i * pitchx );
row_y = (double*)((char*)y + i * pitchy );
for (j=0;j<d;j++){
fscanf(file,"%lf",&row_x[j]);
row_y[j]=row_x[j];
}
}
fclose(file);
//allocate 2d arrays for device memory
double *d_x;
double *d_y;
double *d_ynew;
size_t d_pitchx,d_pitchy,d_pitchynew;
hipMallocPitch((void**)&d_x, &d_pitchx, d * sizeof(double), N);
hipMallocPitch((void**)&d_y, &d_pitchy, d * sizeof(double), N);
hipMallocPitch((void**)&d_ynew, &d_pitchynew, d * sizeof(double), N);
//copy data from host to device memory
hipMemcpy2D(d_x,d_pitchx,x,pitchx, d * sizeof(double), N, hipMemcpyHostToDevice);
hipMemcpy2D(d_y,d_pitchy,y,pitchy, d * sizeof(double), N, hipMemcpyHostToDevice);
int repeats=0;
double norm;
double *row_ynew;
gettimeofday (&startwtime, NULL);
do{
hipLaunchKernelGGL(( meanShift), dim3(N),dim3(d),d*sizeof(double), 0, d_x,d_pitchx,d_y,d_pitchy,d_ynew,d_pitchynew,N,d,sigma);
hipMemcpy2D(y, sizeof(double)*d, d_y, d_pitchy, sizeof(double) * d, N, hipMemcpyDeviceToHost);
//calculate norm of (ynew-y)
norm = froNorm(y,pitchy,ynew,pitchynew,N,d);
//update ynew after a meanshift iteration
for (i=0;i<N ;i++){
row_ynew = (double*)((char*)ynew + i * pitchynew);
row_y = (double*)((char*)y +i * pitchy);
for (j=0;j<d;j++){
row_ynew[j] = row_y[j];
}
}
repeats++;
}while(norm>epsilon && repeats<maxRep);
gettimeofday (&endwtime, NULL);
time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("Wall clock time: %f \n", time);
//argv[2] is the (testfile) name
test(y,pitchy,argv[2],N,d);
return 0;
}
__global__ void meanShift(double *x,size_t pitchx,double *y, size_t pitchy,double *ynew,size_t pitchynew,int N,int d,double sigma){
int index=blockDim.x*blockIdx.x+threadIdx.x;
int id=threadIdx.x;
extern __shared__ double sy[];
if (index<N){
double sum=0,res=0;
int j,k;
double* row_y=(double*)((char*)y+index*pitchy);
double* row_ynew=(double*)((char*)ynew+index*pitchynew);
//copy y to shared memory
for(k=0;k<d;k++)
sy[id*d+k]=row_y[k];
__syncthreads();
//initialize ynew
for(k=0;k<d;k++)
row_ynew[k]=0;
for(j=0;j<N;j++){
double* row_x=(double*)((char*)x+j*pitchx);
if(calcDist(sy,pitchy,row_x,pitchx,d)<sigma*sigma){
double temp=0;
for(k=0;k<d;k++){
temp=(sy[k]-row_x[k])*(sy[k]-row_x[k])+temp;
//temp is the square of norm2(y_i-x_j)
}
res=gausK(temp,sigma);
for(k=0;k<d;k++){
row_ynew[k]=row_ynew[k]+row_x[k]*res;
}
sum=sum+res;
//calculating denominator of ynew_i
}
}
for(k=0;k<d;k++){
row_ynew[k]=row_ynew[k]/sum;
}
//update y from all treads
for(k=0;k<d;k++){
row_y[k]=row_ynew[k];
}
}
}
//calculate distance between x and y
__device__ double calcDist(double *y,size_t pitchy,double *x,size_t pitchx,int d){
double sum = 0;
int l;
for (l=0;l<d;l++){
sum = sum + (y[l]-x[l])*(y[l]-x[l]);
}
return sqrt(sum);
}
__device__ double gausK(double x,double sigma){
double f;
f = exp(-x/(2*(sigma*sigma)));
return f;
}
//calculate frobenius norm of (a-b)
double froNorm(double *a,size_t pitcha,double *b,size_t pitchb,int N,int d){
int i,j;
double sum=0;
double *row_b,*row_a;
for (i=0;i<N;i++){
row_a = (double*)((char*)a + i * pitcha);
row_b = (double*)((char*)b + i * pitchb);
for (j=0;j<d;j++){
sum = sum + (row_a[j]-row_b[j])*(row_a[j]-row_b[j]);
}
}
return sqrt(sum);
}
void test(double *y,size_t pitchy,char *testfile,int N,int d){
int i,j;
double **test;
//memory allocation for test input
test =(double **) malloc(sizeof(double*)*N);
for (i=0;i<N;i++){
test[i] = (double *)malloc(sizeof(double)*d);
}
FILE *file = fopen(testfile, "r");
if(file==NULL){
printf("Couldn't open %s\n",testfile);
exit(1);
}
for (i=0;i<N;i++){
for (j=0;j<d;j++){
fscanf(file,"%lf",&test[i][j]);
}
}
//compare the arrays
int failed=0;
for (i=0;i<N;i++){
double* row_y=(double*)((char*)y+i*pitchy);
for (j=0;j<d;j++){
//check if relative error to matlab output is small
if (fabs(row_y[j]-(double)test[i][j])/fabs((double)test[i][j]) > 0.1)
failed++;
}
}
//check if a small percentage of the result is wrong
if((double)(d*N-failed)/(double)(d*N)*100<90.0)
printf("Test failed!\n");
else
printf("Test passed!\n");
fclose(file);
}
| 8d11671518fc1478f6a50c171ac00ec4429ac16c.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#define epsilon 0.00001
#define maxRep 40
__global__ void meanShift(double *x,size_t pitchx,double *y, size_t pitchy,double *ynew,size_t pitchynew,int N,int d,double sigma);
__device__ double calcDist(double *y,size_t pitchy,double *x,size_t pitchx,int d);
__device__ double gausK(double x,double sigma);
double froNorm(double *a,size_t pitcha,double *b,size_t pitchb,int N,int d);
void test(double *y,size_t pitchy,char *testfile,int N,int d);
int main(int argc,char **argv){
if(argc!=4){
printf("Usage: %s (dataset) (test) (sigma) where (dataset) ",argv[0]);
printf("is the name of the dataset .txt file, (test) is the name of the ");
printf(".txt test file and (sigma) is the value of sigma for the current dataset\n");
exit(1);
}
struct timeval startwtime, endwtime;
double time;
//turn (sigma) input from string to double
double sigma=atof(argv[3]);
int i,j;
//argv[1] is the (dataset) file
FILE *file = fopen(argv[1], "r");
if(file==NULL){
printf("Couldn't open %s\n",argv[1]);
exit(1);
}
//count the number of points and dimensions of (dataset)
int d=0,N=0;
char ch;
/**dimension and number of points counting found in
*https://www.opentechguides.com/how-to/article/c/72/c-file-counts.html
*/
while ((ch=getc(file)) != EOF) {
if ((ch == ' ' || ch == '\n') && N==0) { ++d; }
if (ch == '\n') { ++N; }
}
//1 dimension host memory allocation to fit cudaMemcpy2D
double *y;
size_t pitchy = sizeof(double) * d;
y = (double*)malloc(sizeof(double) * N * d);
double *ynew;
size_t pitchynew = sizeof(double) * d;
ynew = (double*)malloc(sizeof(double) * N * d);
double *x;
size_t pitchx = sizeof(double) * d;
x = (double*)malloc(sizeof(double) * N * d);
double *row_x,*row_y;
//return file pointer to the beggining of the file
fseek(file, 0, SEEK_SET);
for (i=0;i<N;i++){
row_x = (double*)((char*)x + i * pitchx );
row_y = (double*)((char*)y + i * pitchy );
for (j=0;j<d;j++){
fscanf(file,"%lf",&row_x[j]);
row_y[j]=row_x[j];
}
}
fclose(file);
//allocate 2d arrays for device memory
double *d_x;
double *d_y;
double *d_ynew;
size_t d_pitchx,d_pitchy,d_pitchynew;
cudaMallocPitch((void**)&d_x, &d_pitchx, d * sizeof(double), N);
cudaMallocPitch((void**)&d_y, &d_pitchy, d * sizeof(double), N);
cudaMallocPitch((void**)&d_ynew, &d_pitchynew, d * sizeof(double), N);
//copy data from host to device memory
cudaMemcpy2D(d_x,d_pitchx,x,pitchx, d * sizeof(double), N, cudaMemcpyHostToDevice);
cudaMemcpy2D(d_y,d_pitchy,y,pitchy, d * sizeof(double), N, cudaMemcpyHostToDevice);
int repeats=0;
double norm;
double *row_ynew;
gettimeofday (&startwtime, NULL);
do{
meanShift<<<N,d,d*sizeof(double)>>>(d_x,d_pitchx,d_y,d_pitchy,d_ynew,d_pitchynew,N,d,sigma);
cudaMemcpy2D(y, sizeof(double)*d, d_y, d_pitchy, sizeof(double) * d, N, cudaMemcpyDeviceToHost);
//calculate norm of (ynew-y)
norm = froNorm(y,pitchy,ynew,pitchynew,N,d);
//update ynew after a meanshift iteration
for (i=0;i<N ;i++){
row_ynew = (double*)((char*)ynew + i * pitchynew);
row_y = (double*)((char*)y +i * pitchy);
for (j=0;j<d;j++){
row_ynew[j] = row_y[j];
}
}
repeats++;
}while(norm>epsilon && repeats<maxRep);
gettimeofday (&endwtime, NULL);
time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("Wall clock time: %f \n", time);
//argv[2] is the (testfile) name
test(y,pitchy,argv[2],N,d);
return 0;
}
__global__ void meanShift(double *x,size_t pitchx,double *y, size_t pitchy,double *ynew,size_t pitchynew,int N,int d,double sigma){
int index=blockDim.x*blockIdx.x+threadIdx.x;
int id=threadIdx.x;
extern __shared__ double sy[];
if (index<N){
double sum=0,res=0;
int j,k;
double* row_y=(double*)((char*)y+index*pitchy);
double* row_ynew=(double*)((char*)ynew+index*pitchynew);
//copy y to shared memory
for(k=0;k<d;k++)
sy[id*d+k]=row_y[k];
__syncthreads();
//initialize ynew
for(k=0;k<d;k++)
row_ynew[k]=0;
for(j=0;j<N;j++){
double* row_x=(double*)((char*)x+j*pitchx);
if(calcDist(sy,pitchy,row_x,pitchx,d)<sigma*sigma){
double temp=0;
for(k=0;k<d;k++){
temp=(sy[k]-row_x[k])*(sy[k]-row_x[k])+temp;
//temp is the square of norm2(y_i-x_j)
}
res=gausK(temp,sigma);
for(k=0;k<d;k++){
row_ynew[k]=row_ynew[k]+row_x[k]*res;
}
sum=sum+res;
//calculating denominator of ynew_i
}
}
for(k=0;k<d;k++){
row_ynew[k]=row_ynew[k]/sum;
}
//update y from all treads
for(k=0;k<d;k++){
row_y[k]=row_ynew[k];
}
}
}
//calculate distance between x and y
__device__ double calcDist(double *y,size_t pitchy,double *x,size_t pitchx,int d){
double sum = 0;
int l;
for (l=0;l<d;l++){
sum = sum + (y[l]-x[l])*(y[l]-x[l]);
}
return sqrt(sum);
}
__device__ double gausK(double x,double sigma){
double f;
f = exp(-x/(2*(sigma*sigma)));
return f;
}
//calculate frobenius norm of (a-b)
double froNorm(double *a,size_t pitcha,double *b,size_t pitchb,int N,int d){
int i,j;
double sum=0;
double *row_b,*row_a;
for (i=0;i<N;i++){
row_a = (double*)((char*)a + i * pitcha);
row_b = (double*)((char*)b + i * pitchb);
for (j=0;j<d;j++){
sum = sum + (row_a[j]-row_b[j])*(row_a[j]-row_b[j]);
}
}
return sqrt(sum);
}
void test(double *y,size_t pitchy,char *testfile,int N,int d){
int i,j;
double **test;
//memory allocation for test input
test =(double **) malloc(sizeof(double*)*N);
for (i=0;i<N;i++){
test[i] = (double *)malloc(sizeof(double)*d);
}
FILE *file = fopen(testfile, "r");
if(file==NULL){
printf("Couldn't open %s\n",testfile);
exit(1);
}
for (i=0;i<N;i++){
for (j=0;j<d;j++){
fscanf(file,"%lf",&test[i][j]);
}
}
//compare the arrays
int failed=0;
for (i=0;i<N;i++){
double* row_y=(double*)((char*)y+i*pitchy);
for (j=0;j<d;j++){
//check if relative error to matlab output is small
if (fabs(row_y[j]-(double)test[i][j])/fabs((double)test[i][j]) > 0.1)
failed++;
}
}
//check if a small percentage of the result is wrong
if((double)(d*N-failed)/(double)(d*N)*100<90.0)
printf("Test failed!\n");
else
printf("Test passed!\n");
fclose(file);
}
|
9521839761facaad3fa98c0fb042bc6fb6cf81e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ns.hpp"
namespace NS {
__constant__ double _rho;
__constant__ double _nu;
__constant__ double _dx;
__constant__ double _dy;
__constant__ double _dt;
__constant__ int _nx;
__constant__ int _ny;
void initParams(
const double rho,
const double nu,
const double dx,
const double dy,
const double dt,
const int nx,
const int ny
)
{
hipMemcpyToSymbol(_rho, &rho, sizeof(double));
hipMemcpyToSymbol(_nu, &nu, sizeof(double));
hipMemcpyToSymbol(_dx, &dx, sizeof(double));
hipMemcpyToSymbol(_dy, &dy, sizeof(double));
hipMemcpyToSymbol(_dt, &dt, sizeof(double));
hipMemcpyToSymbol(_nx, &nx, sizeof(int));
hipMemcpyToSymbol(_ny, &ny, sizeof(int));
}
//
// solver
//
namespace Solver {
//
// calculation of velocity
//
void calcVel(
double *un,
double *vn,
const double *u,
const double *v,
const double *p,
const dim3 &grid,
const dim3 &block
)
{
hipLaunchKernelGGL(( calcVelKernel), dim3(grid), dim3(block), 0, 0, un, vn, u, v, p);
hipDeviceSynchronize();
}
__global__ void calcVelKernel(
double *un,
double *vn,
const double *u,
const double *v,
const double *p
)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
if (isHalo(i, j)) return;
const int idx = i + _nx*j;
const double u_c = u[idx];
const double u_xp = u[idx+1];
const double u_xm = u[idx-1];
const double u_yp = u[idx+_nx];
const double u_ym = u[idx-_nx];
const double v_c = v[idx];
const double v_xp = v[idx+1];
const double v_xm = v[idx-1];
const double v_yp = v[idx+_nx];
const double v_ym = v[idx-_nx];
// advection(convection) term
const double adv_x = - u_c*_dt/_dx*(u_c-u_xm) - v_c*_dt/_dy*(u_c-u_ym);
const double adv_y = - u_c*_dt/_dx*(v_c-v_xm) - v_c*_dt/_dy*(v_c-v_ym);
// pressure term
const double p_x = - _dt/(2.0*_rho*_dx)*(p[idx+1]-p[idx-1]);
const double p_y = - _dt/(2.0*_rho*_dy)*(p[idx+_nx]-p[idx-_nx]);
// diffusion term
const double diff_x = _nu*(_dt/sq(_dx)*(u_xp-2*u_c+u_xm) +
_dt/sq(_dy)*(u_yp-2*u_c+u_ym));
const double diff_y = _nu*(_dt/sq(_dx)*(v_xp-2*v_c+v_xm) +
_dt/sq(_dy)*(v_yp-2*v_c+v_ym));
un[idx] = u[idx] + adv_x + p_x + diff_x;
vn[idx] = v[idx] + adv_y + p_y + diff_y;
}
//
// calculation of source term: b
//
void calcSource(
double *b,
const double *u,
const double *v,
const dim3 &grid,
const dim3 &block
)
{
hipLaunchKernelGGL(( calcSourceKernel), dim3(grid), dim3(block), 0, 0, b, u, v);
hipDeviceSynchronize();
}
__global__ void calcSourceKernel(
double *b,
const double *u,
const double *v
)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
if (isHalo(i, j)) return;
const int idx = i + _nx*j;
b[idx] = _rho*(1/_dt*((u[idx+1]-u[idx-1])/(2.0*_dx) + (v[idx+_nx]-v[idx-_nx])/(2.0*_dy))
- sq((u[idx+1]-u[idx-1])/(2.0*_dx))
- 2.0*(u[idx+_nx]-u[idx-_nx])/(2.0*_dy)*(v[idx+1]-v[idx-1])/(2.0*_dx)
- sq((v[idx+_nx]-v[idx-_nx])/(2.0*_dy)));
}
//
// calculation of Poisson eq. for pressure
//
void calcPress(
double *pn,
const double *p,
const double *b,
const dim3 &grid,
const dim3 &block
)
{
hipLaunchKernelGGL(( calcPressKernel), dim3(grid), dim3(block), 0, 0, pn, p, b);
hipDeviceSynchronize();
}
__global__ void calcPressKernel(
double *pn,
const double *p,
const double *b
)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
if (isHalo(i, j)) return;
const int idx = i + _nx*j;
pn[idx] = ((p[idx+1]+p[idx-1])*sq(_dy) +
(p[idx+_nx]+p[idx-_nx])*sq(_dx)) /
(2.0*(sq(_dx)+sq(_dy))) -
sq(_dx)*sq(_dy)/(2.0*(sq(_dx)+sq(_dy))) *
b[idx];
}
} // namespace Solver
//
// boundary conditions
//
namespace BC {
void cavityFlow(
double *u,
double *v,
dim3 &grid,
dim3 &block
)
{
hipLaunchKernelGGL(( cavityFlowKernel), dim3(grid), dim3(block), 0, 0, u, v);
hipDeviceSynchronize();
}
__global__ void cavityFlowKernel(double *u, double *v)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int idx = i + _nx*j;
// left
if (i == 0) {
u[idx] = 0.0;
v[idx] = 0.0;
}
// right
if (i == _nx-1) {
u[idx] = 0.0;
v[idx] = 0.0;
}
// bottom
if (j == 0) {
u[idx] = 0.0;
v[idx] = 0.0;
}
// top
if (j == _ny-1) {
u[idx] = 1.0;
v[idx] = 0.0;
}
}
void neumann(
double *p,
dim3 &grid,
dim3 &block
)
{
hipLaunchKernelGGL(( neumannKernel), dim3(grid), dim3(block), 0, 0, p);
hipDeviceSynchronize();
}
__global__ void neumannKernel(double *p)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int idx = i + _nx*j;
// left
if (i == 0) {
p[idx] = p[1+_nx*j];
}
// right
if (i == _nx-1) {
p[idx] = p[_nx-2+_nx*j];
}
// bottom
if (j == 0) {
p[idx] = p[i+_nx*1];
}
// top
if (j == _ny-1) {
p[idx] = 0.0;
}
}
} // namespace BC
} // namespace NS
| 9521839761facaad3fa98c0fb042bc6fb6cf81e7.cu | #include "ns.hpp"
namespace NS {
__constant__ double _rho;
__constant__ double _nu;
__constant__ double _dx;
__constant__ double _dy;
__constant__ double _dt;
__constant__ int _nx;
__constant__ int _ny;
void initParams(
const double rho,
const double nu,
const double dx,
const double dy,
const double dt,
const int nx,
const int ny
)
{
cudaMemcpyToSymbol(_rho, &rho, sizeof(double));
cudaMemcpyToSymbol(_nu, &nu, sizeof(double));
cudaMemcpyToSymbol(_dx, &dx, sizeof(double));
cudaMemcpyToSymbol(_dy, &dy, sizeof(double));
cudaMemcpyToSymbol(_dt, &dt, sizeof(double));
cudaMemcpyToSymbol(_nx, &nx, sizeof(int));
cudaMemcpyToSymbol(_ny, &ny, sizeof(int));
}
//
// solver
//
namespace Solver {
//
// calculation of velocity
//
void calcVel(
double *un,
double *vn,
const double *u,
const double *v,
const double *p,
const dim3 &grid,
const dim3 &block
)
{
calcVelKernel<<<grid, block>>>(un, vn, u, v, p);
cudaDeviceSynchronize();
}
__global__ void calcVelKernel(
double *un,
double *vn,
const double *u,
const double *v,
const double *p
)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
if (isHalo(i, j)) return;
const int idx = i + _nx*j;
const double u_c = u[idx];
const double u_xp = u[idx+1];
const double u_xm = u[idx-1];
const double u_yp = u[idx+_nx];
const double u_ym = u[idx-_nx];
const double v_c = v[idx];
const double v_xp = v[idx+1];
const double v_xm = v[idx-1];
const double v_yp = v[idx+_nx];
const double v_ym = v[idx-_nx];
// advection(convection) term
const double adv_x = - u_c*_dt/_dx*(u_c-u_xm) - v_c*_dt/_dy*(u_c-u_ym);
const double adv_y = - u_c*_dt/_dx*(v_c-v_xm) - v_c*_dt/_dy*(v_c-v_ym);
// pressure term
const double p_x = - _dt/(2.0*_rho*_dx)*(p[idx+1]-p[idx-1]);
const double p_y = - _dt/(2.0*_rho*_dy)*(p[idx+_nx]-p[idx-_nx]);
// diffusion term
const double diff_x = _nu*(_dt/sq(_dx)*(u_xp-2*u_c+u_xm) +
_dt/sq(_dy)*(u_yp-2*u_c+u_ym));
const double diff_y = _nu*(_dt/sq(_dx)*(v_xp-2*v_c+v_xm) +
_dt/sq(_dy)*(v_yp-2*v_c+v_ym));
un[idx] = u[idx] + adv_x + p_x + diff_x;
vn[idx] = v[idx] + adv_y + p_y + diff_y;
}
//
// calculation of source term: b
//
void calcSource(
double *b,
const double *u,
const double *v,
const dim3 &grid,
const dim3 &block
)
{
calcSourceKernel<<<grid, block>>>(b, u, v);
cudaDeviceSynchronize();
}
__global__ void calcSourceKernel(
double *b,
const double *u,
const double *v
)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
if (isHalo(i, j)) return;
const int idx = i + _nx*j;
b[idx] = _rho*(1/_dt*((u[idx+1]-u[idx-1])/(2.0*_dx) + (v[idx+_nx]-v[idx-_nx])/(2.0*_dy))
- sq((u[idx+1]-u[idx-1])/(2.0*_dx))
- 2.0*(u[idx+_nx]-u[idx-_nx])/(2.0*_dy)*(v[idx+1]-v[idx-1])/(2.0*_dx)
- sq((v[idx+_nx]-v[idx-_nx])/(2.0*_dy)));
}
//
// calculation of Poisson eq. for pressure
//
void calcPress(
double *pn,
const double *p,
const double *b,
const dim3 &grid,
const dim3 &block
)
{
calcPressKernel<<<grid, block>>>(pn, p, b);
cudaDeviceSynchronize();
}
__global__ void calcPressKernel(
double *pn,
const double *p,
const double *b
)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
if (isHalo(i, j)) return;
const int idx = i + _nx*j;
pn[idx] = ((p[idx+1]+p[idx-1])*sq(_dy) +
(p[idx+_nx]+p[idx-_nx])*sq(_dx)) /
(2.0*(sq(_dx)+sq(_dy))) -
sq(_dx)*sq(_dy)/(2.0*(sq(_dx)+sq(_dy))) *
b[idx];
}
} // namespace Solver
//
// boundary conditions
//
namespace BC {
void cavityFlow(
double *u,
double *v,
dim3 &grid,
dim3 &block
)
{
cavityFlowKernel<<<grid, block>>>(u, v);
cudaDeviceSynchronize();
}
__global__ void cavityFlowKernel(double *u, double *v)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int idx = i + _nx*j;
// left
if (i == 0) {
u[idx] = 0.0;
v[idx] = 0.0;
}
// right
if (i == _nx-1) {
u[idx] = 0.0;
v[idx] = 0.0;
}
// bottom
if (j == 0) {
u[idx] = 0.0;
v[idx] = 0.0;
}
// top
if (j == _ny-1) {
u[idx] = 1.0;
v[idx] = 0.0;
}
}
void neumann(
double *p,
dim3 &grid,
dim3 &block
)
{
neumannKernel<<<grid, block>>>(p);
cudaDeviceSynchronize();
}
__global__ void neumannKernel(double *p)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int idx = i + _nx*j;
// left
if (i == 0) {
p[idx] = p[1+_nx*j];
}
// right
if (i == _nx-1) {
p[idx] = p[_nx-2+_nx*j];
}
// bottom
if (j == 0) {
p[idx] = p[i+_nx*1];
}
// top
if (j == _ny-1) {
p[idx] = 0.0;
}
}
} // namespace BC
} // namespace NS
|
19a6ba6a60d4684ac9a9e97754f93a15cb74b41a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/ztrtri_upper.cu, normal z -> d, Thu Oct 8 23:05:35 2020
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
This file implements upper case, and is called by dtrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_NONBATCHED
#include "dtrtri.cuh"
#include "dtrtri_upper_device.cuh"
/******************************************************************************/
__global__ void
dtrtri_diag_upper_kernel(
magma_diag_t diag, int n, const double *A, int lda, double *d_dinvA)
{
dtrtri_diag_upper_device(diag, n, A, lda, d_dinvA);
}
/******************************************************************************/
__global__ void
triple_dgemm16_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm16_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part3_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
| 19a6ba6a60d4684ac9a9e97754f93a15cb74b41a.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/ztrtri_upper.cu, normal z -> d, Thu Oct 8 23:05:35 2020
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
This file implements upper case, and is called by dtrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_NONBATCHED
#include "dtrtri.cuh"
#include "dtrtri_upper_device.cuh"
/******************************************************************************/
__global__ void
dtrtri_diag_upper_kernel(
magma_diag_t diag, int n, const double *A, int lda, double *d_dinvA)
{
dtrtri_diag_upper_device(diag, n, A, lda, d_dinvA);
}
/******************************************************************************/
__global__ void
triple_dgemm16_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm16_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm32_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm64_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_dgemm_above64_part3_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
|
44efb23812076a6d60d506e093fe0c479bbc83fd.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <hip/hip_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive INT addition access
if((i%32)<=11){
#pragma unroll 100
for(unsigned k=0; k<iterations;k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random unsigned entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
} | 44efb23812076a6d60d506e093fe0c479bbc83fd.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <cuda_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive INT addition access
if((i%32)<=11){
#pragma unroll 100
for(unsigned k=0; k<iterations;k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random unsigned entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
} |
105f8f4b7a5b1d4dc1b67fa0667bc92825c17242.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math_functions.h>
const int NUM_OF_DIMENSIONS = 3;
__constant__ double d_OMEGA= 0.64;
__constant__ double d_phi = 1.4;
__constant__ double PI = 3.1415;
__device__ double tempParticle1[NUM_OF_DIMENSIONS];
__device__ double tempParticle2[NUM_OF_DIMENSIONS];
// Rosenbrock function
__device__ double fitness_function(double x[], int dimensionsCount)
{
int A = 10;
double result = 0.0;
for (int i = 0; i < dimensionsCount; i++)
{
result += x[i] * x[i] - A * cos(2 * PI * x[i]);
}
return A * dimensionsCount + result;
}
extern "C" {
__global__ void kernelUpdateParticle(double *positions, double *velocities,
double *pBests, double *gBest,
int particlesCount, int dimensionsCount,
double r1, double r2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= particlesCount * dimensionsCount)
return;
velocities[i] = d_OMEGA * velocities[i] + r1 * (pBests[i] - positions[i])
+ r2 * (gBest[i % dimensionsCount] - positions[i]);
// Update posisi particle
positions[i] += velocities[i];
}
__global__ void kernelUpdatePBest(double *positions, double *pBests, double* gBest,
int particlesCount, int dimensionsCount)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= particlesCount * dimensionsCount || i % dimensionsCount != 0)
return;
for (int j = 0; j < dimensionsCount; j++)
{
tempParticle1[j] = positions[i + j];
tempParticle2[j] = pBests[i + j];
}
if (fitness_function(tempParticle1, dimensionsCount) < fitness_function(tempParticle2, dimensionsCount))
{
for (int k = 0; k < dimensionsCount; k++)
pBests[i + k] = positions[i + k];
}
}
} | 105f8f4b7a5b1d4dc1b67fa0667bc92825c17242.cu | #include <cuda_runtime.h>
#include <cuda.h>
#include <math_functions.h>
const int NUM_OF_DIMENSIONS = 3;
__constant__ double d_OMEGA= 0.64;
__constant__ double d_phi = 1.4;
__constant__ double PI = 3.1415;
__device__ double tempParticle1[NUM_OF_DIMENSIONS];
__device__ double tempParticle2[NUM_OF_DIMENSIONS];
// Rosenbrock function
__device__ double fitness_function(double x[], int dimensionsCount)
{
int A = 10;
double result = 0.0;
for (int i = 0; i < dimensionsCount; i++)
{
result += x[i] * x[i] - A * cos(2 * PI * x[i]);
}
return A * dimensionsCount + result;
}
extern "C" {
__global__ void kernelUpdateParticle(double *positions, double *velocities,
double *pBests, double *gBest,
int particlesCount, int dimensionsCount,
double r1, double r2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= particlesCount * dimensionsCount)
return;
velocities[i] = d_OMEGA * velocities[i] + r1 * (pBests[i] - positions[i])
+ r2 * (gBest[i % dimensionsCount] - positions[i]);
// Update posisi particle
positions[i] += velocities[i];
}
__global__ void kernelUpdatePBest(double *positions, double *pBests, double* gBest,
int particlesCount, int dimensionsCount)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= particlesCount * dimensionsCount || i % dimensionsCount != 0)
return;
for (int j = 0; j < dimensionsCount; j++)
{
tempParticle1[j] = positions[i + j];
tempParticle2[j] = pBests[i + j];
}
if (fitness_function(tempParticle1, dimensionsCount) < fitness_function(tempParticle2, dimensionsCount))
{
for (int k = 0; k < dimensionsCount; k++)
pBests[i + k] = positions[i + k];
}
}
} |
823f1df0e214061f5a990354a16b5dcbb2df0734.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/place.h"
namespace paddle {
namespace framework {
static __global__ void FillNAN(float* buf) {
buf[0] = 0.0;
buf[1] = 0.1;
buf[2] = NAN;
}
static __global__ void FillInf(float* buf) {
buf[0] = 0.0;
buf[1] = INFINITY;
buf[2] = 0.5;
}
static __global__ void FillNAN(platform::float16* buf) {
buf[0] = 0.0;
buf[1] = 0.1;
buf[2].x = 0x7fff;
}
static __global__ void FillInf(platform::float16* buf) {
buf[0] = 0.0;
buf[1].x = 0x7c00;
buf[2] = 0.5;
}
TEST(TensorContainsNAN, GPU) {
using namespace paddle::platform;
CUDAPlace gpu(0);
auto& pool = DeviceContextPool::Instance();
auto* cuda_ctx = pool.GetByPlace(gpu);
{
Tensor tensor;
float* buf = tensor.mutable_data<float>({3}, gpu);
hipLaunchKernelGGL(( FillNAN), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
cuda_ctx->Wait();
ASSERT_TRUE(TensorContainsNAN(tensor));
}
{
Tensor tensor;
float16* buf = tensor.mutable_data<float16>({3}, gpu);
hipLaunchKernelGGL(( FillNAN), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
cuda_ctx->Wait();
ASSERT_TRUE(TensorContainsNAN(tensor));
}
}
TEST(TensorContainsInf, GPU) {
using namespace paddle::platform;
CUDAPlace gpu(0);
auto& pool = DeviceContextPool::Instance();
auto* cuda_ctx = pool.GetByPlace(gpu);
{
Tensor tensor;
float* buf = tensor.mutable_data<float>({3}, gpu);
hipLaunchKernelGGL(( FillInf), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
cuda_ctx->Wait();
ASSERT_TRUE(TensorContainsInf(tensor));
}
{
Tensor tensor;
float16* buf = tensor.mutable_data<float16>({3}, gpu);
hipLaunchKernelGGL(( FillInf), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
cuda_ctx->Wait();
ASSERT_TRUE(TensorContainsInf(tensor));
}
}
} // namespace framework
} // namespace paddle
| 823f1df0e214061f5a990354a16b5dcbb2df0734.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/place.h"
namespace paddle {
namespace framework {
static __global__ void FillNAN(float* buf) {
buf[0] = 0.0;
buf[1] = 0.1;
buf[2] = NAN;
}
static __global__ void FillInf(float* buf) {
buf[0] = 0.0;
buf[1] = INFINITY;
buf[2] = 0.5;
}
static __global__ void FillNAN(platform::float16* buf) {
buf[0] = 0.0;
buf[1] = 0.1;
buf[2].x = 0x7fff;
}
static __global__ void FillInf(platform::float16* buf) {
buf[0] = 0.0;
buf[1].x = 0x7c00;
buf[2] = 0.5;
}
TEST(TensorContainsNAN, GPU) {
using namespace paddle::platform;
CUDAPlace gpu(0);
auto& pool = DeviceContextPool::Instance();
auto* cuda_ctx = pool.GetByPlace(gpu);
{
Tensor tensor;
float* buf = tensor.mutable_data<float>({3}, gpu);
FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
cuda_ctx->Wait();
ASSERT_TRUE(TensorContainsNAN(tensor));
}
{
Tensor tensor;
float16* buf = tensor.mutable_data<float16>({3}, gpu);
FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
cuda_ctx->Wait();
ASSERT_TRUE(TensorContainsNAN(tensor));
}
}
TEST(TensorContainsInf, GPU) {
using namespace paddle::platform;
CUDAPlace gpu(0);
auto& pool = DeviceContextPool::Instance();
auto* cuda_ctx = pool.GetByPlace(gpu);
{
Tensor tensor;
float* buf = tensor.mutable_data<float>({3}, gpu);
FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
cuda_ctx->Wait();
ASSERT_TRUE(TensorContainsInf(tensor));
}
{
Tensor tensor;
float16* buf = tensor.mutable_data<float16>({3}, gpu);
FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
cuda_ctx->Wait();
ASSERT_TRUE(TensorContainsInf(tensor));
}
}
} // namespace framework
} // namespace paddle
|
4998338875a7f7124543d5b8f5e89a099c6f05d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2023, ETH Zurich and UNC Chapel Hill.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
#include "colmap/mvs/gpu_mat_ref_image.h"
#include "colmap/util/cudacc.h"
#include <iostream>
namespace colmap {
namespace mvs {
namespace {
__global__ void FilterKernel(const hipTextureObject_t image_texture,
GpuMat<uint8_t> image,
GpuMat<float> sum_image,
GpuMat<float> squared_sum_image,
const int window_radius,
const int window_step,
const float sigma_spatial,
const float sigma_color) {
const size_t row = blockDim.y * blockIdx.y + threadIdx.y;
const size_t col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= image.GetHeight() || col >= image.GetWidth()) {
return;
}
BilateralWeightComputer bilateral_weight_computer(sigma_spatial, sigma_color);
const float center_color = tex2D<float>(image_texture, col, row);
float color_sum = 0.0f;
float color_squared_sum = 0.0f;
float bilateral_weight_sum = 0.0f;
for (int window_row = -window_radius; window_row <= window_radius;
window_row += window_step) {
for (int window_col = -window_radius; window_col <= window_radius;
window_col += window_step) {
const float color =
tex2D<float>(image_texture, col + window_col, row + window_row);
const float bilateral_weight = bilateral_weight_computer.Compute(
window_row, window_col, center_color, color);
color_sum += bilateral_weight * color;
color_squared_sum += bilateral_weight * color * color;
bilateral_weight_sum += bilateral_weight;
}
}
color_sum /= bilateral_weight_sum;
color_squared_sum /= bilateral_weight_sum;
image.Set(row, col, static_cast<uint8_t>(255.0f * center_color));
sum_image.Set(row, col, color_sum);
squared_sum_image.Set(row, col, color_squared_sum);
}
} // namespace
GpuMatRefImage::GpuMatRefImage(const size_t width, const size_t height)
: height_(height), width_(width) {
image.reset(new GpuMat<uint8_t>(width, height));
sum_image.reset(new GpuMat<float>(width, height));
squared_sum_image.reset(new GpuMat<float>(width, height));
}
void GpuMatRefImage::Filter(const uint8_t* image_data,
const size_t window_radius,
const size_t window_step,
const float sigma_spatial,
const float sigma_color) {
hipTextureDesc texture_desc;
memset(&texture_desc, 0, sizeof(texture_desc));
texture_desc.addressMode[0] = hipAddressModeBorder;
texture_desc.addressMode[1] = hipAddressModeBorder;
texture_desc.addressMode[2] = hipAddressModeBorder;
texture_desc.filterMode = hipFilterModePoint;
texture_desc.readMode = hipReadModeNormalizedFloat;
texture_desc.normalizedCoords = false;
auto image_texture = CudaArrayLayeredTexture<uint8_t>::FromHostArray(
texture_desc, width_, height_, 1, image_data);
const dim3 block_size(kBlockDimX, kBlockDimY);
const dim3 grid_size((width_ - 1) / block_size.x + 1,
(height_ - 1) / block_size.y + 1);
hipLaunchKernelGGL(( FilterKernel), dim3(grid_size), dim3(block_size), 0, 0, image_texture->GetObj(),
*image,
*sum_image,
*squared_sum_image,
window_radius,
window_step,
sigma_spatial,
sigma_color);
CUDA_SYNC_AND_CHECK();
}
} // namespace mvs
} // namespace colmap
| 4998338875a7f7124543d5b8f5e89a099c6f05d0.cu | // Copyright (c) 2023, ETH Zurich and UNC Chapel Hill.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
#include "colmap/mvs/gpu_mat_ref_image.h"
#include "colmap/util/cudacc.h"
#include <iostream>
namespace colmap {
namespace mvs {
namespace {
__global__ void FilterKernel(const cudaTextureObject_t image_texture,
GpuMat<uint8_t> image,
GpuMat<float> sum_image,
GpuMat<float> squared_sum_image,
const int window_radius,
const int window_step,
const float sigma_spatial,
const float sigma_color) {
const size_t row = blockDim.y * blockIdx.y + threadIdx.y;
const size_t col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= image.GetHeight() || col >= image.GetWidth()) {
return;
}
BilateralWeightComputer bilateral_weight_computer(sigma_spatial, sigma_color);
const float center_color = tex2D<float>(image_texture, col, row);
float color_sum = 0.0f;
float color_squared_sum = 0.0f;
float bilateral_weight_sum = 0.0f;
for (int window_row = -window_radius; window_row <= window_radius;
window_row += window_step) {
for (int window_col = -window_radius; window_col <= window_radius;
window_col += window_step) {
const float color =
tex2D<float>(image_texture, col + window_col, row + window_row);
const float bilateral_weight = bilateral_weight_computer.Compute(
window_row, window_col, center_color, color);
color_sum += bilateral_weight * color;
color_squared_sum += bilateral_weight * color * color;
bilateral_weight_sum += bilateral_weight;
}
}
color_sum /= bilateral_weight_sum;
color_squared_sum /= bilateral_weight_sum;
image.Set(row, col, static_cast<uint8_t>(255.0f * center_color));
sum_image.Set(row, col, color_sum);
squared_sum_image.Set(row, col, color_squared_sum);
}
} // namespace
GpuMatRefImage::GpuMatRefImage(const size_t width, const size_t height)
: height_(height), width_(width) {
image.reset(new GpuMat<uint8_t>(width, height));
sum_image.reset(new GpuMat<float>(width, height));
squared_sum_image.reset(new GpuMat<float>(width, height));
}
void GpuMatRefImage::Filter(const uint8_t* image_data,
const size_t window_radius,
const size_t window_step,
const float sigma_spatial,
const float sigma_color) {
cudaTextureDesc texture_desc;
memset(&texture_desc, 0, sizeof(texture_desc));
texture_desc.addressMode[0] = cudaAddressModeBorder;
texture_desc.addressMode[1] = cudaAddressModeBorder;
texture_desc.addressMode[2] = cudaAddressModeBorder;
texture_desc.filterMode = cudaFilterModePoint;
texture_desc.readMode = cudaReadModeNormalizedFloat;
texture_desc.normalizedCoords = false;
auto image_texture = CudaArrayLayeredTexture<uint8_t>::FromHostArray(
texture_desc, width_, height_, 1, image_data);
const dim3 block_size(kBlockDimX, kBlockDimY);
const dim3 grid_size((width_ - 1) / block_size.x + 1,
(height_ - 1) / block_size.y + 1);
FilterKernel<<<grid_size, block_size>>>(image_texture->GetObj(),
*image,
*sum_image,
*squared_sum_image,
window_radius,
window_step,
sigma_spatial,
sigma_color);
CUDA_SYNC_AND_CHECK();
}
} // namespace mvs
} // namespace colmap
|
1bed44a8ad770264eb4aec0f29480ccebfc658c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 28-Jun-2012 10:58:41
//
// user function
__device__
#include "adt_calc.h"
// CUDA kernel function
__global__ void op_cuda_adt_calc(
double *ind_arg0,
int *ind_map,
short *arg_map,
double *arg4,
double *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ double *ind_arg0_s;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*1];
ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelem; n+=blockDim.x) {
// user-supplied kernel call
adt_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2,
ind_arg0_s+arg_map[1*set_size+n+offset_b]*2,
ind_arg0_s+arg_map[2*set_size+n+offset_b]*2,
ind_arg0_s+arg_map[3*set_size+n+offset_b]*2,
arg4+(n+offset_b)*4,
arg5+(n+offset_b)*1 );
}
}
// host stub function
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
int ninds = 1;
int inds[6] = {0,0,0,0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: adt_calc\n");
}
// get plan
#ifdef OP_PART_SIZE_1
int part_size = OP_PART_SIZE_1;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers_core(&cpu_t1, &wall_t1);
if (set->size >0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args);
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
hipLaunchKernelGGL(( op_cuda_adt_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d,
Plan->ind_map,
Plan->loc_map,
(double *)arg4.data_d,
(double *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set_size);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_adt_calc execution failed\n");
}
block_offset += Plan->ncolblk[col];
}
op_timing_realloc(1);
OP_kernels[1].transfer += Plan->transfer;
OP_kernels[1].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
op_timing_realloc(1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
OP_kernels[1].time += wall_t2 - wall_t1;
}
| 1bed44a8ad770264eb4aec0f29480ccebfc658c6.cu | //
// auto-generated by op2.m on 28-Jun-2012 10:58:41
//
// user function
__device__
#include "adt_calc.h"
// CUDA kernel function
__global__ void op_cuda_adt_calc(
double *ind_arg0,
int *ind_map,
short *arg_map,
double *arg4,
double *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ double *ind_arg0_s;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*1];
ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelem; n+=blockDim.x) {
// user-supplied kernel call
adt_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2,
ind_arg0_s+arg_map[1*set_size+n+offset_b]*2,
ind_arg0_s+arg_map[2*set_size+n+offset_b]*2,
ind_arg0_s+arg_map[3*set_size+n+offset_b]*2,
arg4+(n+offset_b)*4,
arg5+(n+offset_b)*1 );
}
}
// host stub function
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
int ninds = 1;
int inds[6] = {0,0,0,0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: adt_calc\n");
}
// get plan
#ifdef OP_PART_SIZE_1
int part_size = OP_PART_SIZE_1;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers_core(&cpu_t1, &wall_t1);
if (set->size >0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args);
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
op_cuda_adt_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d,
Plan->ind_map,
Plan->loc_map,
(double *)arg4.data_d,
(double *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set_size);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_adt_calc execution failed\n");
}
block_offset += Plan->ncolblk[col];
}
op_timing_realloc(1);
OP_kernels[1].transfer += Plan->transfer;
OP_kernels[1].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
op_timing_realloc(1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
OP_kernels[1].time += wall_t2 - wall_t1;
}
|
77d6922ebded2ffe3181736f00492f647c2e2f7d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
// return GB/sec
float GBPerSec(int bytes, float sec) {
return static_cast<float>(bytes) / (1024. * 1024. * 1024.) / sec;
}
// This is the CUDA "kernel" function that is run on the GPU. You
// know this because it is marked as a __global__ function.
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = blockIdx.x * blockDim.x + threadIdx.x;
// this check is necessary to make the code work for values of N
// that are not a multiple of the thread block size (blockDim.x)
if (index < N)
result[index] = alpha * x[index] + y[index];
}
// saxpyCuda --
//
// This function is regular C code running on the CPU. It allocates
// memory on the GPU using CUDA API functions, uses CUDA API functions
// to transfer data from the CPU's memory address space to GPU memory
// address space, and launches the CUDA kernel function on the GPU.
void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) {
// must read both input arrays (xarray and yarray) and write to
// output array (resultarray)
int totalBytes = sizeof(float) * 3 * N;
// compute number of blocks and threads per block. In this
// application we've hardcoded thread blocks to contain 512 CUDA
// threads.
const int threadsPerBlock = 512;
// Notice the round up here. The code needs to compute the number
// of threads blocks needed such that there is one thread per
// element of the arrays. This code is written to work for values
// of N that are not multiples of threadPerBlock.
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
// These are pointers that will be pointers to memory allocated
// *one the GPU*. You should allocate these pointers via
// hipMalloc. You can access the resulting buffers from CUDA
// device kernel code (see the kernel function saxpy_kernel()
// above) but you cannot access the contents these buffers from
// this thread. CPU threads cannot issue loads and stores from GPU
// memory!
float* device_x;
float* device_y;
float* device_result;
//
// STUDENTS TODO: allocate device memory buffers on the GPU using hipMalloc.
//
// We highly recommend taking a look at NVIDIA's
// tutorial, which clearly walks you through the few lines of code
// you need to write for this part of the assignment:
//
// https://devblogs.nvidia.com/easy-introduction-cuda-c-and-c/
//
// start timing after allocation of device memory
double startTime = CycleTimer::currentSeconds();
//
// STUDENTS TODO: copy input arrays to the GPU using hipMemcpy
//
// run CUDA kernel. (notice the <<< >>> brackets indicating a CUDA
// kernel launch) Execution on the GPU occurs here.
hipLaunchKernelGGL(( saxpy_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, alpha, device_x, device_y, device_result);
//
// STUDENTS TODO: copy result from GPU back to CPU using hipMemcpy
//
// end timing after result has been copied back into host memory
//double endTime = CycleTimer::currentSeconds();
hipError_t errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n",
errCode, hipGetErrorString(errCode));
}
double overallDuration = endTime - startTime;
printf("Effective BW by CUDA saxpy: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, GBPerSec(totalBytes, overallDuration));
//
// STUDENTS TODO: free memory buffers on the GPU using hipFree
//
}
void printCudaInfo() {
// print out stats about the GPU in the machine. Useful if
// students want to know what GPU they are running on.
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
| 77d6922ebded2ffe3181736f00492f647c2e2f7d.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
// return GB/sec
float GBPerSec(int bytes, float sec) {
return static_cast<float>(bytes) / (1024. * 1024. * 1024.) / sec;
}
// This is the CUDA "kernel" function that is run on the GPU. You
// know this because it is marked as a __global__ function.
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall thread index from position of thread in current
// block, and given the block we are in (in this example only a 1D
// calculation is needed so the code only looks at the .x terms of
// blockDim and threadIdx.
int index = blockIdx.x * blockDim.x + threadIdx.x;
// this check is necessary to make the code work for values of N
// that are not a multiple of the thread block size (blockDim.x)
if (index < N)
result[index] = alpha * x[index] + y[index];
}
// saxpyCuda --
//
// This function is regular C code running on the CPU. It allocates
// memory on the GPU using CUDA API functions, uses CUDA API functions
// to transfer data from the CPU's memory address space to GPU memory
// address space, and launches the CUDA kernel function on the GPU.
void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) {
// must read both input arrays (xarray and yarray) and write to
// output array (resultarray)
int totalBytes = sizeof(float) * 3 * N;
// compute number of blocks and threads per block. In this
// application we've hardcoded thread blocks to contain 512 CUDA
// threads.
const int threadsPerBlock = 512;
// Notice the round up here. The code needs to compute the number
// of threads blocks needed such that there is one thread per
// element of the arrays. This code is written to work for values
// of N that are not multiples of threadPerBlock.
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
// These are pointers that will be pointers to memory allocated
// *one the GPU*. You should allocate these pointers via
// cudaMalloc. You can access the resulting buffers from CUDA
// device kernel code (see the kernel function saxpy_kernel()
// above) but you cannot access the contents these buffers from
// this thread. CPU threads cannot issue loads and stores from GPU
// memory!
float* device_x;
float* device_y;
float* device_result;
//
// STUDENTS TODO: allocate device memory buffers on the GPU using cudaMalloc.
//
// We highly recommend taking a look at NVIDIA's
// tutorial, which clearly walks you through the few lines of code
// you need to write for this part of the assignment:
//
// https://devblogs.nvidia.com/easy-introduction-cuda-c-and-c/
//
// start timing after allocation of device memory
double startTime = CycleTimer::currentSeconds();
//
// STUDENTS TODO: copy input arrays to the GPU using cudaMemcpy
//
// run CUDA kernel. (notice the <<< >>> brackets indicating a CUDA
// kernel launch) Execution on the GPU occurs here.
saxpy_kernel<<<blocks, threadsPerBlock>>>(N, alpha, device_x, device_y, device_result);
//
// STUDENTS TODO: copy result from GPU back to CPU using cudaMemcpy
//
// end timing after result has been copied back into host memory
//double endTime = CycleTimer::currentSeconds();
cudaError_t errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n",
errCode, cudaGetErrorString(errCode));
}
double overallDuration = endTime - startTime;
printf("Effective BW by CUDA saxpy: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, GBPerSec(totalBytes, overallDuration));
//
// STUDENTS TODO: free memory buffers on the GPU using cudaFree
//
}
void printCudaInfo() {
// print out stats about the GPU in the machine. Useful if
// students want to know what GPU they are running on.
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
b0a0d5dc00a4ea6f6345d649af7a47404bc3c770.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void build_binary_tree(int *x, int *child, int *root, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
bool newBody = true;
int rootValue = *root;
// build binary tree
int childPath;
int temp;
offset = 0;
while((bodyIndex + offset) < n){
if(newBody){
newBody = false;
temp = 0;
childPath = 0;
if(x[bodyIndex + offset] > rootValue){
childPath = 1;
}
}
int childIndex = child[temp*2 + childPath];
// traverse tree until we hit leaf node
while(childIndex >= 0){
temp = childIndex;
childPath = 0;
if(x[bodyIndex + offset] > temp){
childPath = 1;
}
childIndex = child[2*temp + childPath];
}
if(childIndex != -2){
int locked = temp*2 + childPath;
if(atomicCAS(&child[locked], childIndex, -2) == childIndex){
if(childIndex == -1){
child[locked] = x[bodyIndex + offset];
}
offset += stride;
newBody = true;
}
}
__syncthreads(); // not strictly needed
}
}
int main(){
int n = 32;
int *h_x; //host array
int *d_x; //device array
int *h_root;
int *d_root;
int *h_child;
int *d_child;
// allocate memory
h_x = (int*)malloc(n*sizeof(int));
h_root = (int*)malloc(sizeof(int));
h_child = (int*)malloc(2*(n+1)*sizeof(int));
hipMalloc((void**)&d_root, sizeof(int));
hipMalloc((void**)&d_x, n*sizeof(int));
hipMalloc((void**)&d_child, 2*(n+1)*sizeof(int));
hipMemset(d_child, -1, 2*(n+1)*sizeof(int));
// fill h_temp and h_x arrays
for(int i=0;i<n;i++){
h_x[i] = i+1;
}
// shuffling the array
for(int i=0;i<n;i++){
int j = random() % (n-i);
int temp = h_x[i];
h_x[i] = h_x[i+j];
h_x[i+j] = temp;
}
*h_root = h_x[0];
for(int i=0;i<n;i++){
printf("%d ", h_x[i]);
}
printf("\n");
// copy data to device
hipMemcpy(d_root, h_root, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_x, h_x, n*sizeof(int), hipMemcpyHostToDevice);
// kernel call
hipLaunchKernelGGL(( build_binary_tree), dim3(16), dim3(16), 0, 0, d_x, d_child, d_root, n);
// copy from device back to host
hipMemcpy(h_child, d_child, 2*(n+1)*sizeof(int), hipMemcpyDeviceToHost);
// print tree
for(int i=0;i<2*(n+1);i++){
printf("%d ", h_child[i]);
}
printf("\n");
// free memory
free(h_x);
free(h_root);
free(h_child);
hipFree(d_x);
hipFree(d_root);
hipFree(d_child);
}
| b0a0d5dc00a4ea6f6345d649af7a47404bc3c770.cu | #include <stdio.h>
__global__ void build_binary_tree(int *x, int *child, int *root, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
bool newBody = true;
int rootValue = *root;
// build binary tree
int childPath;
int temp;
offset = 0;
while((bodyIndex + offset) < n){
if(newBody){
newBody = false;
temp = 0;
childPath = 0;
if(x[bodyIndex + offset] > rootValue){
childPath = 1;
}
}
int childIndex = child[temp*2 + childPath];
// traverse tree until we hit leaf node
while(childIndex >= 0){
temp = childIndex;
childPath = 0;
if(x[bodyIndex + offset] > temp){
childPath = 1;
}
childIndex = child[2*temp + childPath];
}
if(childIndex != -2){
int locked = temp*2 + childPath;
if(atomicCAS(&child[locked], childIndex, -2) == childIndex){
if(childIndex == -1){
child[locked] = x[bodyIndex + offset];
}
offset += stride;
newBody = true;
}
}
__syncthreads(); // not strictly needed
}
}
int main(){
int n = 32;
int *h_x; //host array
int *d_x; //device array
int *h_root;
int *d_root;
int *h_child;
int *d_child;
// allocate memory
h_x = (int*)malloc(n*sizeof(int));
h_root = (int*)malloc(sizeof(int));
h_child = (int*)malloc(2*(n+1)*sizeof(int));
cudaMalloc((void**)&d_root, sizeof(int));
cudaMalloc((void**)&d_x, n*sizeof(int));
cudaMalloc((void**)&d_child, 2*(n+1)*sizeof(int));
cudaMemset(d_child, -1, 2*(n+1)*sizeof(int));
// fill h_temp and h_x arrays
for(int i=0;i<n;i++){
h_x[i] = i+1;
}
// shuffling the array
for(int i=0;i<n;i++){
int j = random() % (n-i);
int temp = h_x[i];
h_x[i] = h_x[i+j];
h_x[i+j] = temp;
}
*h_root = h_x[0];
for(int i=0;i<n;i++){
printf("%d ", h_x[i]);
}
printf("\n");
// copy data to device
cudaMemcpy(d_root, h_root, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, h_x, n*sizeof(int), cudaMemcpyHostToDevice);
// kernel call
build_binary_tree<<< 16, 16>>>(d_x, d_child, d_root, n);
// copy from device back to host
cudaMemcpy(h_child, d_child, 2*(n+1)*sizeof(int), cudaMemcpyDeviceToHost);
// print tree
for(int i=0;i<2*(n+1);i++){
printf("%d ", h_child[i]);
}
printf("\n");
// free memory
free(h_x);
free(h_root);
free(h_child);
cudaFree(d_x);
cudaFree(d_root);
cudaFree(d_child);
}
|
ef5f39df1abe68d61359bb69cc097e2db2970dec.hip | // !!! This is a file automatically generated by hipify!!!
//-----------------------------------------------------------------------
// Reference
//
// Harris, M. and Garland, M., 2012.
// Optimizing parallel prefix operations for the Fermi architecture.
// In GPU Computing Gems Jade Edition (pp. 29-38). Morgan Kaufmann.
//-----------------------------------------------------------------------
#include <cstdio>
#include <cstring>
#include <chrono>
#include <hip/hip_runtime.h>
__device__ __inline__ int warp_scan(int val, volatile int *s_data)
{
// initialize shared memory accessed by each warp with zeros
int idx = 2 * threadIdx.x - (threadIdx.x & (warpSize-1));
s_data[idx] = 0;
idx += warpSize;
int t = s_data[idx] = val;
s_data[idx] = t += s_data[idx - 1];
s_data[idx] = t += s_data[idx - 2];
s_data[idx] = t += s_data[idx - 4];
s_data[idx] = t += s_data[idx - 8];
s_data[idx] = t += s_data[idx -16];
return s_data[idx-1];
}
__device__ __inline__ unsigned int lanemask_lt()
{
#ifdef ASM
unsigned int mask;
asm("mov.u32 %0, %lanemask_lt;" : "=r"(mask));
return mask;
#else
const unsigned int lane = threadIdx.x & (warpSize-1);
return (1 << (lane)) - 1;
#endif
}
// warp scan optimized for binary
__device__ __inline__ unsigned int binary_warp_scan(bool p)
{
const unsigned int mask = lanemask_lt();
#if (CUDART_VERSION < 9000)
unsigned int b = __ballot(p);
return __popc(b & mask);
#else
unsigned int b = __ballot_sync(mask, p);
return __popc(b);
#endif
}
// positive numbers
__host__ __device__ __inline__
bool valid(int x) {
return x > 0;
}
__device__ __inline__ int block_binary_prefix_sums(int x)
{
// 2 x warpIdx's upper bound (1024/32)
__shared__ int sdata[64];
bool predicate = valid(x);
// A. Compute exclusive prefix sums within each warp
int warpPrefix = binary_warp_scan(predicate);
int idx = threadIdx.x;
int warpIdx = idx / warpSize;
int laneIdx = idx & (warpSize - 1);
#ifdef DEBUG
printf("A %d %d %d\n", warpIdx, laneIdx, warpPrefix);
#endif
// B. The last thread of each warp stores inclusive
// prefix sum to the warps index in shared memory
if (laneIdx == warpSize - 1) {
sdata[warpIdx] = warpPrefix + predicate;
#ifdef DEBUG
printf("B %d %d\n", warpIdx, sdata[warpIdx]);
#endif
}
__syncthreads();
// C. One warp scans the warp partial sums
if (idx < warpSize) {
sdata[idx] = warp_scan(sdata[idx], sdata);
#ifdef DEBUG
printf("C: %d %d\n", idx, sdata[idx]);
#endif
}
__syncthreads();
// D. Each thread adds prefix sums of warp partial
// sums to its own intrawarp prefix sums
return warpPrefix + sdata[warpIdx];
}
__global__ void binary_scan(
int *__restrict__ g_odata,
const int *__restrict__ g_idata)
{
int i = threadIdx.x;
g_odata[i] = block_binary_prefix_sums(g_idata[i]);
}
template <int N>
void bscan (const int repeat)
{
int h_in[N];
int h_out[N];
int ref_out[N];
int *d_in, *d_out;
hipMalloc((void**)&d_in, N*sizeof(int));
hipMalloc((void**)&d_out, N*sizeof(int));
bool ok = true;
double time = 0.0;
srand(123);
size_t grid_size = 12*7*8*9*10;
dim3 grids (grid_size);
dim3 blocks (N);
int valid_count = 0;
for (int i = 0; i < repeat; i++) {
for (int n = 0; n < N; n++) {
h_in[n] = rand() % N - N/2;
if (valid(h_in[n])) valid_count++; // total number of valid elements
}
hipMemcpy(d_in, h_in, N*sizeof(int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( binary_scan), dim3(grids), dim3(blocks), 0, 0, d_out, d_in);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
// verify exclusive sum
hipMemcpy(h_out, d_out, N*sizeof(int), hipMemcpyDeviceToHost);
ref_out[0] = 0;
ok &= (h_out[0] == ref_out[0]);
for (int i = 1; i < N; i++) {
ref_out[i] = ref_out[i-1] + (h_in[i-1] > 0);
ok &= (ref_out[i] == h_out[i]);
}
if (!ok) break;
} // for
printf("Block size = %d, ratio of valid elements = %f, verify = %s\n",
N, valid_count * 1.f / (N * repeat), ok ? "PASS" : "FAIL");
if (ok) {
printf("Average execution time: %f (us)\n", (time * 1e-3f) / repeat);
printf("Billion elements per second: %f\n\n",
grid_size * N * repeat / time);
}
hipFree(d_in);
hipFree(d_out);
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
// scan over N elements (N = [32, 1024])
bscan<32>(repeat);
bscan<64>(repeat);
bscan<128>(repeat);
bscan<256>(repeat);
bscan<512>(repeat);
bscan<1024>(repeat);
return 0;
}
| ef5f39df1abe68d61359bb69cc097e2db2970dec.cu | //-----------------------------------------------------------------------
// Reference
//
// Harris, M. and Garland, M., 2012.
// Optimizing parallel prefix operations for the Fermi architecture.
// In GPU Computing Gems Jade Edition (pp. 29-38). Morgan Kaufmann.
//-----------------------------------------------------------------------
#include <cstdio>
#include <cstring>
#include <chrono>
#include <cuda.h>
__device__ __inline__ int warp_scan(int val, volatile int *s_data)
{
// initialize shared memory accessed by each warp with zeros
int idx = 2 * threadIdx.x - (threadIdx.x & (warpSize-1));
s_data[idx] = 0;
idx += warpSize;
int t = s_data[idx] = val;
s_data[idx] = t += s_data[idx - 1];
s_data[idx] = t += s_data[idx - 2];
s_data[idx] = t += s_data[idx - 4];
s_data[idx] = t += s_data[idx - 8];
s_data[idx] = t += s_data[idx -16];
return s_data[idx-1];
}
__device__ __inline__ unsigned int lanemask_lt()
{
#ifdef ASM
unsigned int mask;
asm("mov.u32 %0, %lanemask_lt;" : "=r"(mask));
return mask;
#else
const unsigned int lane = threadIdx.x & (warpSize-1);
return (1 << (lane)) - 1;
#endif
}
// warp scan optimized for binary
__device__ __inline__ unsigned int binary_warp_scan(bool p)
{
const unsigned int mask = lanemask_lt();
#if (CUDART_VERSION < 9000)
unsigned int b = __ballot(p);
return __popc(b & mask);
#else
unsigned int b = __ballot_sync(mask, p);
return __popc(b);
#endif
}
// positive numbers
__host__ __device__ __inline__
bool valid(int x) {
return x > 0;
}
__device__ __inline__ int block_binary_prefix_sums(int x)
{
// 2 x warpIdx's upper bound (1024/32)
__shared__ int sdata[64];
bool predicate = valid(x);
// A. Compute exclusive prefix sums within each warp
int warpPrefix = binary_warp_scan(predicate);
int idx = threadIdx.x;
int warpIdx = idx / warpSize;
int laneIdx = idx & (warpSize - 1);
#ifdef DEBUG
printf("A %d %d %d\n", warpIdx, laneIdx, warpPrefix);
#endif
// B. The last thread of each warp stores inclusive
// prefix sum to the warp’s index in shared memory
if (laneIdx == warpSize - 1) {
sdata[warpIdx] = warpPrefix + predicate;
#ifdef DEBUG
printf("B %d %d\n", warpIdx, sdata[warpIdx]);
#endif
}
__syncthreads();
// C. One warp scans the warp partial sums
if (idx < warpSize) {
sdata[idx] = warp_scan(sdata[idx], sdata);
#ifdef DEBUG
printf("C: %d %d\n", idx, sdata[idx]);
#endif
}
__syncthreads();
// D. Each thread adds prefix sums of warp partial
// sums to its own intra−warp prefix sums
return warpPrefix + sdata[warpIdx];
}
__global__ void binary_scan(
int *__restrict__ g_odata,
const int *__restrict__ g_idata)
{
int i = threadIdx.x;
g_odata[i] = block_binary_prefix_sums(g_idata[i]);
}
template <int N>
void bscan (const int repeat)
{
int h_in[N];
int h_out[N];
int ref_out[N];
int *d_in, *d_out;
cudaMalloc((void**)&d_in, N*sizeof(int));
cudaMalloc((void**)&d_out, N*sizeof(int));
bool ok = true;
double time = 0.0;
srand(123);
size_t grid_size = 12*7*8*9*10;
dim3 grids (grid_size);
dim3 blocks (N);
int valid_count = 0;
for (int i = 0; i < repeat; i++) {
for (int n = 0; n < N; n++) {
h_in[n] = rand() % N - N/2;
if (valid(h_in[n])) valid_count++; // total number of valid elements
}
cudaMemcpy(d_in, h_in, N*sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
binary_scan<<<grids, blocks>>>(d_out, d_in);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
// verify exclusive sum
cudaMemcpy(h_out, d_out, N*sizeof(int), cudaMemcpyDeviceToHost);
ref_out[0] = 0;
ok &= (h_out[0] == ref_out[0]);
for (int i = 1; i < N; i++) {
ref_out[i] = ref_out[i-1] + (h_in[i-1] > 0);
ok &= (ref_out[i] == h_out[i]);
}
if (!ok) break;
} // for
printf("Block size = %d, ratio of valid elements = %f, verify = %s\n",
N, valid_count * 1.f / (N * repeat), ok ? "PASS" : "FAIL");
if (ok) {
printf("Average execution time: %f (us)\n", (time * 1e-3f) / repeat);
printf("Billion elements per second: %f\n\n",
grid_size * N * repeat / time);
}
cudaFree(d_in);
cudaFree(d_out);
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
// scan over N elements (N = [32, 1024])
bscan<32>(repeat);
bscan<64>(repeat);
bscan<128>(repeat);
bscan<256>(repeat);
bscan<512>(repeat);
bscan<1024>(repeat);
return 0;
}
|
fc3d743404d1811658710a65e4c8f430f5be8fd9.hip | // !!! This is a file automatically generated by hipify!!!
//C++
#include <time.h>
#include <iostream>
using namespace std;
//openCV
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
//CUDA
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//timer
#include "timer.hpp"
__constant__ int dev_H[9];
__global__ void convolution_kernel(unsigned char* in_image,short *out_image,int width,int height)
{
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
if(x<0 || x>width || y<0 || y>height)
{
return;
}
int pos = y*width+x;
if(x==0||y==0||(x==width-1)||(y==height-1))
{
out_image[pos] = in_image[pos];
return;
}
int left = pos - 1;
int right = pos + 1;
int up = pos - width;
int down = pos + width;
int up_left = up - 1;
int up_right = up + 1;
int down_left = down - 1;
int down_right = down + 1;
out_image[pos] = dev_H[0]*in_image[up_left] + dev_H[1]*in_image[up] + dev_H[2]*in_image[up_right]
+dev_H[3]*in_image[left] + dev_H[4]*in_image[pos] + dev_H[5]*in_image[right]
+dev_H[6]*in_image[down_left] + dev_H[7]*in_image[down] + dev_H[8]*in_image[down_right];
}
int main()
{
//in data
Mat in_image = imread("test.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat out_image = Mat(in_image.size(),CV_16S);
//convolution kernel
int H[9];
H[0]=-1;H[1]=-1;H[2]=-1;
H[3]=-1;H[4]= 8;H[5]=-1;
H[6]=-1;H[7]=-1;H[8]=-1;
//calc
Timer start_time;
//init CUDA
//error status
hipError_t cuda_status;
//only chose one GPU
//init
cuda_status = hipSetDevice(0);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipSetDevice failed! Do you have a CUDA-Capable GPU installed?");
return -1;
}
//in image and out image
unsigned char * dev_in_image;
short * dev_out_image;
//size of image
int image_size = in_image.cols*in_image.rows;
//allocate memory on the GPU
cuda_status = hipMalloc((void**)&dev_in_image,sizeof(unsigned char)*image_size);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMalloc Failed");
exit( EXIT_FAILURE );
}
cuda_status = hipMalloc((void**)&dev_out_image,sizeof(short)*image_size);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMalloc Failed");
exit( EXIT_FAILURE );
}
//copy
cuda_status = hipMemcpy(dev_in_image,in_image.data,sizeof(unsigned char)*image_size,hipMemcpyHostToDevice);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMemcpy Failed");
exit( EXIT_FAILURE );
}
hipMemset(dev_out_image,0,sizeof(short)*image_size);
cuda_status = hipMemcpyToSymbol(dev_H,H,sizeof(int)*9);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMemcpy Failed");
exit( EXIT_FAILURE );
}
dim3 threads(16,16);
dim3 grid(max((in_image.cols+threads.x-1)/threads.x,1),max((in_image.rows+threads.y-1)/threads.y,1));
hipLaunchKernelGGL(( convolution_kernel), dim3(grid),dim3(threads), 0, 0, dev_in_image,dev_out_image,in_image.cols,in_image.rows);
//copy out
cuda_status = hipMemcpy((short*)out_image.data,dev_out_image,sizeof(short)*image_size,hipMemcpyDeviceToHost);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMemcpy Failed");
exit( EXIT_FAILURE );
}
hipFree(dev_in_image);
hipFree(dev_out_image);
cout<<start_time.elapsedMs()<<endl;
//output
Mat abs_dst;
convertScaleAbs( out_image, abs_dst );
imwrite("cuda_constant.jpg",abs_dst);
return 0;
} | fc3d743404d1811658710a65e4c8f430f5be8fd9.cu | //C++
#include <time.h>
#include <iostream>
using namespace std;
//openCV
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
//CUDA
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//timer
#include "timer.hpp"
__constant__ int dev_H[9];
__global__ void convolution_kernel(unsigned char* in_image,short *out_image,int width,int height)
{
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
if(x<0 || x>width || y<0 || y>height)
{
return;
}
int pos = y*width+x;
if(x==0||y==0||(x==width-1)||(y==height-1))
{
out_image[pos] = in_image[pos];
return;
}
int left = pos - 1;
int right = pos + 1;
int up = pos - width;
int down = pos + width;
int up_left = up - 1;
int up_right = up + 1;
int down_left = down - 1;
int down_right = down + 1;
out_image[pos] = dev_H[0]*in_image[up_left] + dev_H[1]*in_image[up] + dev_H[2]*in_image[up_right]
+dev_H[3]*in_image[left] + dev_H[4]*in_image[pos] + dev_H[5]*in_image[right]
+dev_H[6]*in_image[down_left] + dev_H[7]*in_image[down] + dev_H[8]*in_image[down_right];
}
int main()
{
//in data
Mat in_image = imread("test.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat out_image = Mat(in_image.size(),CV_16S);
//convolution kernel
int H[9];
H[0]=-1;H[1]=-1;H[2]=-1;
H[3]=-1;H[4]= 8;H[5]=-1;
H[6]=-1;H[7]=-1;H[8]=-1;
//calc
Timer start_time;
//init CUDA
//error status
cudaError_t cuda_status;
//only chose one GPU
//init
cuda_status = cudaSetDevice(0);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaSetDevice failed! Do you have a CUDA-Capable GPU installed?");
return -1;
}
//in image and out image
unsigned char * dev_in_image;
short * dev_out_image;
//size of image
int image_size = in_image.cols*in_image.rows;
//allocate memory on the GPU
cuda_status = cudaMalloc((void**)&dev_in_image,sizeof(unsigned char)*image_size);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMalloc Failed");
exit( EXIT_FAILURE );
}
cuda_status = cudaMalloc((void**)&dev_out_image,sizeof(short)*image_size);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMalloc Failed");
exit( EXIT_FAILURE );
}
//copy
cuda_status = cudaMemcpy(dev_in_image,in_image.data,sizeof(unsigned char)*image_size,cudaMemcpyHostToDevice);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMemcpy Failed");
exit( EXIT_FAILURE );
}
cudaMemset(dev_out_image,0,sizeof(short)*image_size);
cuda_status = cudaMemcpyToSymbol(dev_H,H,sizeof(int)*9);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMemcpy Failed");
exit( EXIT_FAILURE );
}
dim3 threads(16,16);
dim3 grid(max((in_image.cols+threads.x-1)/threads.x,1),max((in_image.rows+threads.y-1)/threads.y,1));
convolution_kernel<<<grid,threads>>>(dev_in_image,dev_out_image,in_image.cols,in_image.rows);
//copy out
cuda_status = cudaMemcpy((short*)out_image.data,dev_out_image,sizeof(short)*image_size,cudaMemcpyDeviceToHost);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMemcpy Failed");
exit( EXIT_FAILURE );
}
cudaFree(dev_in_image);
cudaFree(dev_out_image);
cout<<start_time.elapsedMs()<<endl;
//output
Mat abs_dst;
convertScaleAbs( out_image, abs_dst );
imwrite("cuda_constant.jpg",abs_dst);
return 0;
} |
604f41046b2bf726ae5f26ba1a42f99fd5e8e08a.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename T>
__device__ __forceinline__ bool is_aligned(T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template <typename FROM_T, typename TO_T>
__device__ void convert(const FROM_T vi, TO_T& vo)
{
vo = static_cast<TO_T>(vi);
}
template <>
__device__ void convert(const float vi, uint8_t& vo)
{
union S
{
float as_float;
int as_int;
};
S s;
s.as_float = vi;
s.as_int = s.as_int & 0xFF800000;
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f);
vo = t.as_byte[1];
}
template <>
__device__ void convert(const uint8_t vi, float& vo)
{
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_byte[0] = 0;
t.as_byte[1] = vi;
vo = static_cast<float>(t.as_half);
}
template <>
__device__ void convert(const at::Half vi, uint8_t& vo)
{
union S
{
float as_float;
int as_int;
};
S s;
s.as_float = static_cast<float>(vi);
s.as_int = s.as_int & 0xFF800000;
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f);
vo = t.as_byte[1];
}
template <>
__device__ void convert(const uint8_t vi, at::Half& vo)
{
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_byte[0] = 0;
t.as_byte[1] = vi;
vo = t.as_half;
}
typedef enum{
MOMENT_MODE_0 =0, // L2 regularization mode
MOMENT_MODE_1 =1 // Decoupled weight decay mode
} adamMode_t;
template<typename T, typename GRAD_T, typename MATH_T>
struct DistOptLAMBStage1Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<5>& tl,
const MATH_T* per_tensor_beta1,
const MATH_T* per_tensor_beta2,
const MATH_T* per_tensor_beta3,
const int* per_tensor_bias_correction,
const int step,
const MATH_T* per_tensor_epsilon,
adamMode_t mode,
const MATH_T* per_tensor_decay,
const float grad_scale)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
MATH_T beta1 = per_tensor_beta1[tensor_num];
MATH_T beta2 = per_tensor_beta2[tensor_num];
MATH_T beta3 = 1 - beta1;
MATH_T beta1_correction, beta2_correction;
if (per_tensor_bias_correction[tensor_num] == 1) {
beta1_correction = 1 - pow(beta1, step);
beta2_correction = 1 - pow(beta2, step);
} else {
beta1_correction = (MATH_T) 1.0;
beta2_correction = (MATH_T) 1.0;
}
MATH_T epsilon = per_tensor_epsilon[tensor_num];
MATH_T decay = per_tensor_decay[tensor_num];
GRAD_T* g = (GRAD_T*)tl.addresses[0][tensor_loc];
g += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
T* m = (T*)tl.addresses[2][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T*)tl.addresses[3][tensor_loc];
v += chunk_idx*chunk_size;
MATH_T* u = (MATH_T*)tl.addresses[4][tensor_loc];
u += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 &&
chunk_size % ILP == 0 &&
is_aligned(g) &&
is_aligned(p) &&
is_aligned(m) &&
is_aligned(v))
{
GRAD_T l_g[ILP];
T l_p[ILP];
T l_m[ILP];
T l_v[ILP];
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(l_g, g, 0, i_start);
if (decay != 0)
load_store(l_p, p, 0, i_start);
load_store(l_m, m, 0, i_start);
load_store(l_v, v, 0, i_start);
// unpack
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_g[ii] = l_g[ii];
if (decay == 0) {
r_p[ii] = MATH_T(0);
}
else {
r_p[ii] = l_p[ii];
}
r_m[ii] = l_m[ii];
r_v[ii] = l_v[ii];
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if (mode == MOMENT_MODE_0) {
MATH_T scaled_grad = r_g[ii] / grad_scale;
// L2 on scaled grad
scaled_grad = scaled_grad + decay*r_p[ii];
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = next_m_unbiased / denom;
}
else {
MATH_T scaled_grad = r_g[ii] / grad_scale;
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
l_m[ii] = r_m[ii];
l_v[ii] = r_v[ii];
}
// store
load_store(u, r_p, i_start, 0);
load_store(m, l_m, i_start, 0);
load_store(v, l_v, i_start, 0);
}
}
else
{
// see note in multi_tensor_scale_kernel.cu
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_g[ii] = g[i];
// special ?optimization? for lamb stage 1
if (decay == 0) {
r_p[ii] = MATH_T(0);
}
else {
r_p[ii] = p[i];
}
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if (mode == MOMENT_MODE_0) {
MATH_T scaled_grad = r_g[ii] / grad_scale;
// L2 on scaled grad
scaled_grad = scaled_grad + decay*r_p[ii];
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = next_m_unbiased / denom;
}
else {
MATH_T scaled_grad = r_g[ii] / grad_scale;
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
u[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
}
};
// Step 2 reads in 'update' value and per-tensor param_norm and update_norm.
// It computes new parameter value.
template<typename T, typename GRAD_T, typename MATH_T>
struct DistOptLAMBStage2Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<3>& tl,
const MATH_T* per_tensor_param_norm,
const MATH_T* per_tensor_update_norm,
const MATH_T learning_rate,
const MATH_T* per_tensor_decay,
bool use_nvlamb)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
MATH_T decay = per_tensor_decay[tensor_num];
MATH_T ratio = learning_rate;
// nvlamb: apply adaptive learning rate to all parameters
// otherwise, only apply to those with non-zero weight decay
if (use_nvlamb || (decay != (MATH_T) 0.0))
{
MATH_T param_norm = per_tensor_param_norm[tensor_num];
MATH_T update_norm = per_tensor_update_norm[tensor_num];
ratio = (update_norm != 0.0 && param_norm != 0.0) ? learning_rate * (param_norm / update_norm) : learning_rate;
}
MATH_T* update = (MATH_T*)tl.addresses[0][tensor_loc];
update += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
GRAD_T* p_copy = (GRAD_T*)tl.addresses[2][tensor_loc];
p_copy += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 &&
chunk_size % ILP == 0 &&
is_aligned(p) &&
is_aligned(update))
{
T r_p[ILP];
MATH_T r_update[ILP];
GRAD_T r_p_copy[ILP];
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(r_p, p, 0, i_start);
load_store(r_update, update, 0, i_start);
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = static_cast<MATH_T>(r_p[ii]) - (ratio * r_update[ii]);
convert(r_p[ii], r_p_copy[ii]);
}
load_store(p, r_p, i_start, 0);
load_store(p_copy, r_p_copy, i_start, 0);
}
}
else
{
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_p[ILP];
MATH_T r_update[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_p[ii] = p[i];
r_update[ii] = update[i];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = r_p[ii] - (ratio * r_update[ii]);
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
convert(r_p[ii], p_copy[i]);
}
}
}
}
}
};
void multi_tensor_lamb_compute_update_term_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor per_tensor_beta1,
at::Tensor per_tensor_beta2,
at::Tensor per_tensor_beta3,
at::Tensor per_tensor_bias_correction,
const int step,
at::Tensor per_tensor_epsilon,
const int mode,
at::Tensor per_tensor_decay,
const float grad_scale)
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 0, "lamb_stage_1",
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 1, "lamb_stage_1",
DISPATCH_FLOAT_AND_HALF(tensor_lists[4][0].scalar_type(), 2, "lamb_stage_1",
multi_tensor_apply<5>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistOptLAMBStage1Functor<scalar_t_0, scalar_t_1, scalar_t_2>(),
per_tensor_beta1.DATA_PTR<scalar_t_2>(),
per_tensor_beta2.DATA_PTR<scalar_t_2>(),
per_tensor_beta3.DATA_PTR<scalar_t_2>(),
per_tensor_bias_correction.DATA_PTR<int>(),
step,
per_tensor_epsilon.DATA_PTR<scalar_t_2>(),
(adamMode_t) mode,
per_tensor_decay.DATA_PTR<scalar_t_2>(),
grad_scale); )))
AT_CUDA_CHECK(hipGetLastError());
}
void multi_tensor_lamb_update_weights_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor per_tensor_param_norm,
at::Tensor per_tensor_update_norm,
const float learning_rate,
at::Tensor per_tensor_decay,
bool use_nvlamb)
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 0, "lamb_stage_2",
DISPATCH_FLOAT_HALF_AND_BYTE(tensor_lists[2][0].scalar_type(), 1, "lamb_stage_2",
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 2, "lamb_stage_2",
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistOptLAMBStage2Functor<scalar_t_0, scalar_t_1, scalar_t_2>(),
per_tensor_param_norm.DATA_PTR<scalar_t_2>(),
per_tensor_update_norm.DATA_PTR<scalar_t_2>(),
(scalar_t_2) learning_rate,
per_tensor_decay.DATA_PTR<scalar_t_2>(),
use_nvlamb); )))
AT_CUDA_CHECK(hipGetLastError());
}
| 604f41046b2bf726ae5f26ba1a42f99fd5e8e08a.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename T>
__device__ __forceinline__ bool is_aligned(T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template <typename FROM_T, typename TO_T>
__device__ void convert(const FROM_T vi, TO_T& vo)
{
vo = static_cast<TO_T>(vi);
}
template <>
__device__ void convert(const float vi, uint8_t& vo)
{
union S
{
float as_float;
int as_int;
};
S s;
s.as_float = vi;
s.as_int = s.as_int & 0xFF800000;
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f);
vo = t.as_byte[1];
}
template <>
__device__ void convert(const uint8_t vi, float& vo)
{
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_byte[0] = 0;
t.as_byte[1] = vi;
vo = static_cast<float>(t.as_half);
}
template <>
__device__ void convert(const at::Half vi, uint8_t& vo)
{
union S
{
float as_float;
int as_int;
};
S s;
s.as_float = static_cast<float>(vi);
s.as_int = s.as_int & 0xFF800000;
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f);
vo = t.as_byte[1];
}
template <>
__device__ void convert(const uint8_t vi, at::Half& vo)
{
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_byte[0] = 0;
t.as_byte[1] = vi;
vo = t.as_half;
}
typedef enum{
MOMENT_MODE_0 =0, // L2 regularization mode
MOMENT_MODE_1 =1 // Decoupled weight decay mode
} adamMode_t;
template<typename T, typename GRAD_T, typename MATH_T>
struct DistOptLAMBStage1Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<5>& tl,
const MATH_T* per_tensor_beta1,
const MATH_T* per_tensor_beta2,
const MATH_T* per_tensor_beta3,
const int* per_tensor_bias_correction,
const int step,
const MATH_T* per_tensor_epsilon,
adamMode_t mode,
const MATH_T* per_tensor_decay,
const float grad_scale)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
MATH_T beta1 = per_tensor_beta1[tensor_num];
MATH_T beta2 = per_tensor_beta2[tensor_num];
MATH_T beta3 = 1 - beta1;
MATH_T beta1_correction, beta2_correction;
if (per_tensor_bias_correction[tensor_num] == 1) {
beta1_correction = 1 - pow(beta1, step);
beta2_correction = 1 - pow(beta2, step);
} else {
beta1_correction = (MATH_T) 1.0;
beta2_correction = (MATH_T) 1.0;
}
MATH_T epsilon = per_tensor_epsilon[tensor_num];
MATH_T decay = per_tensor_decay[tensor_num];
GRAD_T* g = (GRAD_T*)tl.addresses[0][tensor_loc];
g += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
T* m = (T*)tl.addresses[2][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T*)tl.addresses[3][tensor_loc];
v += chunk_idx*chunk_size;
MATH_T* u = (MATH_T*)tl.addresses[4][tensor_loc];
u += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 &&
chunk_size % ILP == 0 &&
is_aligned(g) &&
is_aligned(p) &&
is_aligned(m) &&
is_aligned(v))
{
GRAD_T l_g[ILP];
T l_p[ILP];
T l_m[ILP];
T l_v[ILP];
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(l_g, g, 0, i_start);
if (decay != 0)
load_store(l_p, p, 0, i_start);
load_store(l_m, m, 0, i_start);
load_store(l_v, v, 0, i_start);
// unpack
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_g[ii] = l_g[ii];
if (decay == 0) {
r_p[ii] = MATH_T(0);
}
else {
r_p[ii] = l_p[ii];
}
r_m[ii] = l_m[ii];
r_v[ii] = l_v[ii];
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if (mode == MOMENT_MODE_0) {
MATH_T scaled_grad = r_g[ii] / grad_scale;
// L2 on scaled grad
scaled_grad = scaled_grad + decay*r_p[ii];
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = next_m_unbiased / denom;
}
else {
MATH_T scaled_grad = r_g[ii] / grad_scale;
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
l_m[ii] = r_m[ii];
l_v[ii] = r_v[ii];
}
// store
load_store(u, r_p, i_start, 0);
load_store(m, l_m, i_start, 0);
load_store(v, l_v, i_start, 0);
}
}
else
{
// see note in multi_tensor_scale_kernel.cu
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_g[ii] = g[i];
// special ?optimization? for lamb stage 1
if (decay == 0) {
r_p[ii] = MATH_T(0);
}
else {
r_p[ii] = p[i];
}
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if (mode == MOMENT_MODE_0) {
MATH_T scaled_grad = r_g[ii] / grad_scale;
// L2 on scaled grad
scaled_grad = scaled_grad + decay*r_p[ii];
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = next_m_unbiased / denom;
}
else {
MATH_T scaled_grad = r_g[ii] / grad_scale;
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
u[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
}
};
// Step 2 reads in 'update' value and per-tensor param_norm and update_norm.
// It computes new parameter value.
template<typename T, typename GRAD_T, typename MATH_T>
struct DistOptLAMBStage2Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<3>& tl,
const MATH_T* per_tensor_param_norm,
const MATH_T* per_tensor_update_norm,
const MATH_T learning_rate,
const MATH_T* per_tensor_decay,
bool use_nvlamb)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
MATH_T decay = per_tensor_decay[tensor_num];
MATH_T ratio = learning_rate;
// nvlamb: apply adaptive learning rate to all parameters
// otherwise, only apply to those with non-zero weight decay
if (use_nvlamb || (decay != (MATH_T) 0.0))
{
MATH_T param_norm = per_tensor_param_norm[tensor_num];
MATH_T update_norm = per_tensor_update_norm[tensor_num];
ratio = (update_norm != 0.0 && param_norm != 0.0) ? learning_rate * (param_norm / update_norm) : learning_rate;
}
MATH_T* update = (MATH_T*)tl.addresses[0][tensor_loc];
update += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
GRAD_T* p_copy = (GRAD_T*)tl.addresses[2][tensor_loc];
p_copy += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 &&
chunk_size % ILP == 0 &&
is_aligned(p) &&
is_aligned(update))
{
T r_p[ILP];
MATH_T r_update[ILP];
GRAD_T r_p_copy[ILP];
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(r_p, p, 0, i_start);
load_store(r_update, update, 0, i_start);
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = static_cast<MATH_T>(r_p[ii]) - (ratio * r_update[ii]);
convert(r_p[ii], r_p_copy[ii]);
}
load_store(p, r_p, i_start, 0);
load_store(p_copy, r_p_copy, i_start, 0);
}
}
else
{
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_p[ILP];
MATH_T r_update[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_p[ii] = p[i];
r_update[ii] = update[i];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = r_p[ii] - (ratio * r_update[ii]);
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
convert(r_p[ii], p_copy[i]);
}
}
}
}
}
};
void multi_tensor_lamb_compute_update_term_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor per_tensor_beta1,
at::Tensor per_tensor_beta2,
at::Tensor per_tensor_beta3,
at::Tensor per_tensor_bias_correction,
const int step,
at::Tensor per_tensor_epsilon,
const int mode,
at::Tensor per_tensor_decay,
const float grad_scale)
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 0, "lamb_stage_1",
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 1, "lamb_stage_1",
DISPATCH_FLOAT_AND_HALF(tensor_lists[4][0].scalar_type(), 2, "lamb_stage_1",
multi_tensor_apply<5>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistOptLAMBStage1Functor<scalar_t_0, scalar_t_1, scalar_t_2>(),
per_tensor_beta1.DATA_PTR<scalar_t_2>(),
per_tensor_beta2.DATA_PTR<scalar_t_2>(),
per_tensor_beta3.DATA_PTR<scalar_t_2>(),
per_tensor_bias_correction.DATA_PTR<int>(),
step,
per_tensor_epsilon.DATA_PTR<scalar_t_2>(),
(adamMode_t) mode,
per_tensor_decay.DATA_PTR<scalar_t_2>(),
grad_scale); )))
AT_CUDA_CHECK(cudaGetLastError());
}
void multi_tensor_lamb_update_weights_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor per_tensor_param_norm,
at::Tensor per_tensor_update_norm,
const float learning_rate,
at::Tensor per_tensor_decay,
bool use_nvlamb)
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 0, "lamb_stage_2",
DISPATCH_FLOAT_HALF_AND_BYTE(tensor_lists[2][0].scalar_type(), 1, "lamb_stage_2",
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 2, "lamb_stage_2",
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistOptLAMBStage2Functor<scalar_t_0, scalar_t_1, scalar_t_2>(),
per_tensor_param_norm.DATA_PTR<scalar_t_2>(),
per_tensor_update_norm.DATA_PTR<scalar_t_2>(),
(scalar_t_2) learning_rate,
per_tensor_decay.DATA_PTR<scalar_t_2>(),
use_nvlamb); )))
AT_CUDA_CHECK(cudaGetLastError());
}
|
e0d4a801b70daf284a3f0368c95e33f9625560cd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "hip/hip_runtime.h"
#include <cstdio>
#include "ocuutil/memory.h"
#include "ocuutil/thread.h"
#include "ocuutil/kernel_wrapper.h"
namespace ocu {
void *device_malloc(size_t bytes)
{
KernelWrapper kernel(KernelWrapper::KT_CPU);
kernel.PreKernel();
void *d_ptr = 0;
if (hipSuccess != hipMalloc((void **)&d_ptr, bytes)) {
printf("[ERROR] hipMalloc - failed with hipError_t \"%s\"\n", hipGetErrorString(hipGetLastError()));
return 0;
}
kernel.PostKernel("hipMalloc");
return d_ptr;
}
void device_free(void *ptr)
{
KernelWrapper kernel(KernelWrapper::KT_CPU);
kernel.PreKernel();
hipFree(ptr);
kernel.PostKernel("hipFree");
}
void *host_malloc(size_t bytes, bool pinned, bool write_combined)
{
if (!pinned && !write_combined) {
return malloc(bytes);
}
else {
KernelWrapper kernel(KernelWrapper::KT_CPU);
kernel.PreKernel();
void *result;
// always allocate portable pinned, not just pinned
unsigned int flag = hipHostMallocPortable;
if (write_combined)
flag |= hipHostMallocWriteCombined;
if (hipHostMalloc(&result, bytes, flag) != hipSuccess) {
printf("[ERROR] host_malloc - failed with hipError_t \"%s\"\n", hipGetErrorString(hipGetLastError()));
return 0;
}
kernel.PostKernel("hipHostMalloc");
return result;
}
}
void host_free(void *ptr, bool pinned)
{
KernelWrapper kernel(KernelWrapper::KT_CPU);
kernel.PreKernel();
if (!pinned) {
free(ptr);
}
else {
if (hipHostFree(ptr) != hipSuccess) {
printf("[ERROR] host_free - failed on %p with hipError_t \"%s\"\n", ptr, hipGetErrorString(hipGetLastError()));
}
}
kernel.PostKernel("hipHostFree");
}
} // end namespace
| e0d4a801b70daf284a3f0368c95e33f9625560cd.cu | /*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda.h"
#include <cstdio>
#include "ocuutil/memory.h"
#include "ocuutil/thread.h"
#include "ocuutil/kernel_wrapper.h"
namespace ocu {
void *device_malloc(size_t bytes)
{
KernelWrapper kernel(KernelWrapper::KT_CPU);
kernel.PreKernel();
void *d_ptr = 0;
if (cudaSuccess != cudaMalloc((void **)&d_ptr, bytes)) {
printf("[ERROR] cudaMalloc - failed with cudaError \"%s\"\n", cudaGetErrorString(cudaGetLastError()));
return 0;
}
kernel.PostKernel("cudaMalloc");
return d_ptr;
}
void device_free(void *ptr)
{
KernelWrapper kernel(KernelWrapper::KT_CPU);
kernel.PreKernel();
cudaFree(ptr);
kernel.PostKernel("cudaFree");
}
void *host_malloc(size_t bytes, bool pinned, bool write_combined)
{
if (!pinned && !write_combined) {
return malloc(bytes);
}
else {
KernelWrapper kernel(KernelWrapper::KT_CPU);
kernel.PreKernel();
void *result;
// always allocate portable pinned, not just pinned
unsigned int flag = cudaHostAllocPortable;
if (write_combined)
flag |= cudaHostAllocWriteCombined;
if (cudaHostAlloc(&result, bytes, flag) != cudaSuccess) {
printf("[ERROR] host_malloc - failed with cudaError \"%s\"\n", cudaGetErrorString(cudaGetLastError()));
return 0;
}
kernel.PostKernel("cudaHostAlloc");
return result;
}
}
void host_free(void *ptr, bool pinned)
{
KernelWrapper kernel(KernelWrapper::KT_CPU);
kernel.PreKernel();
if (!pinned) {
free(ptr);
}
else {
if (cudaFreeHost(ptr) != cudaSuccess) {
printf("[ERROR] host_free - failed on %p with cudaError \"%s\"\n", ptr, cudaGetErrorString(cudaGetLastError()));
}
}
kernel.PostKernel("cudaFreeHost");
}
} // end namespace
|
4175042be3ad95a78a1a58cef7fde63b1c827e5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void conv(const float *A, const float *B, int aw, int ah, int bw, int bh, int b_sum, float *C){
/*Get row and column to operate on from thread coordinates*/
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
/*Calculate "padding" radius of convolution kernel (distance around central pixel)*/
int pw = (bw-1)/2;
int ph = (bh-1)/2;
/*If within the range of C (ie A - padding)*/
if( row < (ah-2*ph) && col < (aw-2*pw) ) {
/*Set initial pixel value*/
int val = 0;
/*For each vertical position on the kernel matrix, relative to the central pixel*/
for(int i=-ph; i<=ph; i=i+1){
/*Calculate zero-indexed row ID on kernel matrix*/
int b_row = i+ph;
/*For each horizontal position on the kernel matrix, relative to the central pixel*/
for(int j=-pw; j<=pw; j=j+1){
/*Calculate zero-indexed column ID on kernel matrix*/
int b_col = j+pw;
/*Add product of kernel value and corresponding image value to running total*/
val += A[ (row+ph +i)*aw + (col+pw +j) ] * B[ b_row*bw + b_col ];
}
}
/*Copy appropriately normalised resulting pixel value to position on C matrix*/
C[row*(aw-2*pw) + col] = val/b_sum;
}
} | 4175042be3ad95a78a1a58cef7fde63b1c827e5c.cu | __global__ void conv(const float *A, const float *B, int aw, int ah, int bw, int bh, int b_sum, float *C){
/*Get row and column to operate on from thread coordinates*/
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
/*Calculate "padding" radius of convolution kernel (distance around central pixel)*/
int pw = (bw-1)/2;
int ph = (bh-1)/2;
/*If within the range of C (ie A - padding)*/
if( row < (ah-2*ph) && col < (aw-2*pw) ) {
/*Set initial pixel value*/
int val = 0;
/*For each vertical position on the kernel matrix, relative to the central pixel*/
for(int i=-ph; i<=ph; i=i+1){
/*Calculate zero-indexed row ID on kernel matrix*/
int b_row = i+ph;
/*For each horizontal position on the kernel matrix, relative to the central pixel*/
for(int j=-pw; j<=pw; j=j+1){
/*Calculate zero-indexed column ID on kernel matrix*/
int b_col = j+pw;
/*Add product of kernel value and corresponding image value to running total*/
val += A[ (row+ph +i)*aw + (col+pw +j) ] * B[ b_row*bw + b_col ];
}
}
/*Copy appropriately normalised resulting pixel value to position on C matrix*/
C[row*(aw-2*pw) + col] = val/b_sum;
}
} |
09725646cf2015a83b5abd70ede9bb06e49f2e2e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name(s) of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <iostream>
#include <string>
#include <unordered_map>
#include <vector>
#include <hip/hip_runtime.h>
#include <cutensor.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \
if( err != CUTENSOR_STATUS_SUCCESS ) \
{ printf("Error: %s\n", cutensorGetErrorString(err)); return err; } \
};
#define HANDLE_CUDA_ERROR(x) \
{ const auto err = x; \
if( err != hipSuccess ) \
{ printf("Error: %s\n", hipGetErrorString(err)); return err; } \
};
struct GPUTimer
{
GPUTimer()
{
hipEventCreate(&start_);
hipEventCreate(&stop_);
hipEventRecord(start_, 0);
}
~GPUTimer()
{
hipEventDestroy(start_);
hipEventDestroy(stop_);
}
void start()
{
hipEventRecord(start_, 0);
}
float seconds()
{
hipEventRecord(stop_, 0);
hipEventSynchronize(stop_);
float time;
hipEventElapsedTime(&time, start_, stop_);
return time * 1e-3;
}
private:
hipEvent_t start_, stop_;
};
int main()
{
typedef float floatTypeA;
typedef float floatTypeC;
typedef float floatTypeCompute;
hipDataType typeA = HIP_R_32F;
hipDataType typeC = HIP_R_32F;
hipDataType typeCompute = HIP_R_32F;
floatTypeCompute alpha = (floatTypeCompute)1.1f;
floatTypeCompute gamma = (floatTypeCompute)1.2f;
/**********************
* Computing: C_{a,b,c} = alpha * A_{b,a,c} + gamma * C_{a,b,c}
**********************/
std::vector<int> modeC{'a','b','c'};
std::vector<int> modeA{'c','b','a'};
int nmodeA = modeA.size();
int nmodeC = modeC.size();
std::unordered_map<int, int64_t> extent;
extent['a'] = 400;
extent['b'] = 200;
extent['c'] = 300;
std::vector<int64_t> extentA;
for (auto mode : modeA)
extentA.push_back(extent[mode]);
std::vector<int64_t> extentC;
for (auto mode : modeC)
extentC.push_back(extent[mode]);
/**********************
* Allocating data
**********************/
size_t elementsA = 1;
for (auto mode : modeA)
elementsA *= extent[mode];
size_t elementsC = 1;
for (auto mode : modeC)
elementsC *= extent[mode];
size_t sizeA = sizeof(floatTypeA) * elementsA;
size_t sizeC = sizeof(floatTypeC) * elementsC;
printf("Total memory: %.2f GiB\n",(sizeA + sizeC)/1024./1024./1024);
void *A_d, *C_d, *D_d;
HANDLE_CUDA_ERROR(hipMalloc((void**) &A_d, sizeA));
HANDLE_CUDA_ERROR(hipMalloc((void**) &C_d, sizeC));
HANDLE_CUDA_ERROR(hipMalloc((void**) &D_d, sizeC));
floatTypeA *A = (floatTypeA*) malloc(sizeof(floatTypeA) * elementsA);
floatTypeC *C = (floatTypeC*) malloc(sizeof(floatTypeC) * elementsC);
if (A == NULL || C == NULL)
{
printf("Error: Host allocation of A or C.\n");
return -1;
}
/*******************
* Initialize data
*******************/
for(size_t i = 0; i < elementsA; i++)
A[i] = (((float) rand())/RAND_MAX)*100;
for(size_t i = 0; i < elementsC; i++)
C[i] = (((float) rand())/RAND_MAX)*100;
HANDLE_CUDA_ERROR(hipMemcpy2DAsync(C_d, sizeC, C, sizeC, sizeC, 1, hipMemcpyDefault, 0));
HANDLE_CUDA_ERROR(hipMemcpy2DAsync(D_d, sizeC, C, sizeC, sizeC, 1, hipMemcpyDefault, 0));
HANDLE_CUDA_ERROR(hipMemcpy2DAsync(A_d, sizeA, A, sizeA, sizeA, 1, hipMemcpyDefault, 0));
/*************************
* Memcpy perf
*************************/
double minTimeMEMCPY = 1e100;
hipDeviceSynchronize();
GPUTimer timer;
timer.start();
HANDLE_CUDA_ERROR(hipMemcpy2DAsync(D_d, sizeC, C_d, sizeC, sizeC, 1, hipMemcpyDefault, 0));
hipDeviceSynchronize();
minTimeMEMCPY = timer.seconds();
/*************************
* cuTENSOR
*************************/
cutensorStatus_t err;
cutensorHandle_t handle;
HANDLE_ERROR(cutensorInit(&handle));
/**********************
* Create Tensor Descriptors
**********************/
cutensorTensorDescriptor_t descA;
HANDLE_ERROR(cutensorInitTensorDescriptor( &handle,
&descA,
nmodeA,
extentA.data(),
NULL /* stride */,
typeA, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descC;
HANDLE_ERROR(cutensorInitTensorDescriptor( &handle,
&descC,
nmodeC,
extentC.data(),
NULL /* stride */,
typeC, CUTENSOR_OP_IDENTITY));
double minTimeCUTENSOR = 1e100;
for (int i = 0; i < 3; i++)
{
HANDLE_CUDA_ERROR(hipMemcpy2DAsync(C_d, sizeC, C, sizeC, sizeC, 1, hipMemcpyDefault, 0));
HANDLE_CUDA_ERROR(hipDeviceSynchronize());
timer.start();
err = cutensorElementwiseBinary(&handle,
(void*)&alpha, A_d, &descA, modeA.data(),
(void*)&gamma, C_d, &descC, modeC.data(),
C_d, &descC, modeC.data(),
CUTENSOR_OP_ADD, typeCompute, 0 /* stream */);
auto time = timer.seconds();
if (err != CUTENSOR_STATUS_SUCCESS)
{
printf("ERROR: %s\n", cutensorGetErrorString(err) );
}
minTimeCUTENSOR = (minTimeCUTENSOR < time)? minTimeCUTENSOR : time;
}
/*************************/
double transferedBytes = sizeC;
transferedBytes += ((float)alpha != 0.f) ? sizeA : 0;
transferedBytes += ((float)gamma != 0.f) ? sizeC : 0;
transferedBytes /= 1e9;
printf("cuTensor: %.2f GB/s\n", transferedBytes / minTimeCUTENSOR);
printf("memcpy: %.2f GB/s\n", 2 * sizeC / minTimeMEMCPY / 1e9 );
if (A) free(A);
if (C) free(C);
if (A_d) hipFree(A_d);
if (C_d) hipFree(C_d);
if (D_d) hipFree(D_d);
return 0;
}
| 09725646cf2015a83b5abd70ede9bb06e49f2e2e.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name(s) of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <iostream>
#include <string>
#include <unordered_map>
#include <vector>
#include <cuda_runtime.h>
#include <cutensor.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \
if( err != CUTENSOR_STATUS_SUCCESS ) \
{ printf("Error: %s\n", cutensorGetErrorString(err)); return err; } \
};
#define HANDLE_CUDA_ERROR(x) \
{ const auto err = x; \
if( err != cudaSuccess ) \
{ printf("Error: %s\n", cudaGetErrorString(err)); return err; } \
};
struct GPUTimer
{
GPUTimer()
{
cudaEventCreate(&start_);
cudaEventCreate(&stop_);
cudaEventRecord(start_, 0);
}
~GPUTimer()
{
cudaEventDestroy(start_);
cudaEventDestroy(stop_);
}
void start()
{
cudaEventRecord(start_, 0);
}
float seconds()
{
cudaEventRecord(stop_, 0);
cudaEventSynchronize(stop_);
float time;
cudaEventElapsedTime(&time, start_, stop_);
return time * 1e-3;
}
private:
cudaEvent_t start_, stop_;
};
int main()
{
typedef float floatTypeA;
typedef float floatTypeC;
typedef float floatTypeCompute;
cudaDataType_t typeA = CUDA_R_32F;
cudaDataType_t typeC = CUDA_R_32F;
cudaDataType_t typeCompute = CUDA_R_32F;
floatTypeCompute alpha = (floatTypeCompute)1.1f;
floatTypeCompute gamma = (floatTypeCompute)1.2f;
/**********************
* Computing: C_{a,b,c} = alpha * A_{b,a,c} + gamma * C_{a,b,c}
**********************/
std::vector<int> modeC{'a','b','c'};
std::vector<int> modeA{'c','b','a'};
int nmodeA = modeA.size();
int nmodeC = modeC.size();
std::unordered_map<int, int64_t> extent;
extent['a'] = 400;
extent['b'] = 200;
extent['c'] = 300;
std::vector<int64_t> extentA;
for (auto mode : modeA)
extentA.push_back(extent[mode]);
std::vector<int64_t> extentC;
for (auto mode : modeC)
extentC.push_back(extent[mode]);
/**********************
* Allocating data
**********************/
size_t elementsA = 1;
for (auto mode : modeA)
elementsA *= extent[mode];
size_t elementsC = 1;
for (auto mode : modeC)
elementsC *= extent[mode];
size_t sizeA = sizeof(floatTypeA) * elementsA;
size_t sizeC = sizeof(floatTypeC) * elementsC;
printf("Total memory: %.2f GiB\n",(sizeA + sizeC)/1024./1024./1024);
void *A_d, *C_d, *D_d;
HANDLE_CUDA_ERROR(cudaMalloc((void**) &A_d, sizeA));
HANDLE_CUDA_ERROR(cudaMalloc((void**) &C_d, sizeC));
HANDLE_CUDA_ERROR(cudaMalloc((void**) &D_d, sizeC));
floatTypeA *A = (floatTypeA*) malloc(sizeof(floatTypeA) * elementsA);
floatTypeC *C = (floatTypeC*) malloc(sizeof(floatTypeC) * elementsC);
if (A == NULL || C == NULL)
{
printf("Error: Host allocation of A or C.\n");
return -1;
}
/*******************
* Initialize data
*******************/
for(size_t i = 0; i < elementsA; i++)
A[i] = (((float) rand())/RAND_MAX)*100;
for(size_t i = 0; i < elementsC; i++)
C[i] = (((float) rand())/RAND_MAX)*100;
HANDLE_CUDA_ERROR(cudaMemcpy2DAsync(C_d, sizeC, C, sizeC, sizeC, 1, cudaMemcpyDefault, 0));
HANDLE_CUDA_ERROR(cudaMemcpy2DAsync(D_d, sizeC, C, sizeC, sizeC, 1, cudaMemcpyDefault, 0));
HANDLE_CUDA_ERROR(cudaMemcpy2DAsync(A_d, sizeA, A, sizeA, sizeA, 1, cudaMemcpyDefault, 0));
/*************************
* Memcpy perf
*************************/
double minTimeMEMCPY = 1e100;
cudaDeviceSynchronize();
GPUTimer timer;
timer.start();
HANDLE_CUDA_ERROR(cudaMemcpy2DAsync(D_d, sizeC, C_d, sizeC, sizeC, 1, cudaMemcpyDefault, 0));
cudaDeviceSynchronize();
minTimeMEMCPY = timer.seconds();
/*************************
* cuTENSOR
*************************/
cutensorStatus_t err;
cutensorHandle_t handle;
HANDLE_ERROR(cutensorInit(&handle));
/**********************
* Create Tensor Descriptors
**********************/
cutensorTensorDescriptor_t descA;
HANDLE_ERROR(cutensorInitTensorDescriptor( &handle,
&descA,
nmodeA,
extentA.data(),
NULL /* stride */,
typeA, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descC;
HANDLE_ERROR(cutensorInitTensorDescriptor( &handle,
&descC,
nmodeC,
extentC.data(),
NULL /* stride */,
typeC, CUTENSOR_OP_IDENTITY));
double minTimeCUTENSOR = 1e100;
for (int i = 0; i < 3; i++)
{
HANDLE_CUDA_ERROR(cudaMemcpy2DAsync(C_d, sizeC, C, sizeC, sizeC, 1, cudaMemcpyDefault, 0));
HANDLE_CUDA_ERROR(cudaDeviceSynchronize());
timer.start();
err = cutensorElementwiseBinary(&handle,
(void*)&alpha, A_d, &descA, modeA.data(),
(void*)&gamma, C_d, &descC, modeC.data(),
C_d, &descC, modeC.data(),
CUTENSOR_OP_ADD, typeCompute, 0 /* stream */);
auto time = timer.seconds();
if (err != CUTENSOR_STATUS_SUCCESS)
{
printf("ERROR: %s\n", cutensorGetErrorString(err) );
}
minTimeCUTENSOR = (minTimeCUTENSOR < time)? minTimeCUTENSOR : time;
}
/*************************/
double transferedBytes = sizeC;
transferedBytes += ((float)alpha != 0.f) ? sizeA : 0;
transferedBytes += ((float)gamma != 0.f) ? sizeC : 0;
transferedBytes /= 1e9;
printf("cuTensor: %.2f GB/s\n", transferedBytes / minTimeCUTENSOR);
printf("memcpy: %.2f GB/s\n", 2 * sizeC / minTimeMEMCPY / 1e9 );
if (A) free(A);
if (C) free(C);
if (A_d) cudaFree(A_d);
if (C_d) cudaFree(C_d);
if (D_d) cudaFree(D_d);
return 0;
}
|
bbb21244f1ec1822454b8e22184484b3c7fabe7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/* Matrix normalization.
* Compile with "nvcc matrixNormCuda.c -lm"
*/
/* Program Parameters */
#define N 8000 /* Matrix size */
int blocks_per_grid = 32;
int threads_per_block = 256;
/* Matrices */
float A[N*N], B[N*N];
/* CUDA arrays */
float *A_d, *B_d;
/* Initialize A and B*/
__global__ void matrixNorm(float* A_dd, float* B_dd, int N_d) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// __shared__ float mu, sigma;
float mu, sigma;
int row;
if (idx < N_d) {
mu = 0.0;
for (row=0; row < N_d; row++){
mu += A_dd[row*N_d + idx];
}
mu /= N_d;
sigma = 0.0;
for (row=0; row < N_d; row++){
sigma += powf(A_dd[row*N_d + idx] - mu, 2.0);
}
sigma /= N_d;
sigma = sqrt(sigma);
for (row=0; row < N_d; row++) {
if (sigma == 0.0){
B_dd[row*N_d + idx] = 0.0;
}
else{
B_dd[row*N_d + idx] = (A_dd[row*N_d + idx] - mu) / sigma;
}
}
}
} | bbb21244f1ec1822454b8e22184484b3c7fabe7c.cu | #include "includes.h"
/* Matrix normalization.
* Compile with "nvcc matrixNormCuda.c -lm"
*/
/* Program Parameters */
#define N 8000 /* Matrix size */
int blocks_per_grid = 32;
int threads_per_block = 256;
/* Matrices */
float A[N*N], B[N*N];
/* CUDA arrays */
float *A_d, *B_d;
/* Initialize A and B*/
__global__ void matrixNorm(float* A_dd, float* B_dd, int N_d) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// __shared__ float mu, sigma;
float mu, sigma;
int row;
if (idx < N_d) {
mu = 0.0;
for (row=0; row < N_d; row++){
mu += A_dd[row*N_d + idx];
}
mu /= N_d;
sigma = 0.0;
for (row=0; row < N_d; row++){
sigma += powf(A_dd[row*N_d + idx] - mu, 2.0);
}
sigma /= N_d;
sigma = sqrt(sigma);
for (row=0; row < N_d; row++) {
if (sigma == 0.0){
B_dd[row*N_d + idx] = 0.0;
}
else{
B_dd[row*N_d + idx] = (A_dd[row*N_d + idx] - mu) / sigma;
}
}
}
} |
91f6dfa115238299d29ed99b81b15b590303412f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "scales_channel_mul_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data_l = NULL;
hipMalloc(&data_l, XSIZE*YSIZE);
float *data_r = NULL;
hipMalloc(&data_r, XSIZE*YSIZE);
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
scales_channel_mul_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data_l,data_r,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
scales_channel_mul_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data_l,data_r,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
scales_channel_mul_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data_l,data_r,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 91f6dfa115238299d29ed99b81b15b590303412f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "scales_channel_mul_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data_l = NULL;
cudaMalloc(&data_l, XSIZE*YSIZE);
float *data_r = NULL;
cudaMalloc(&data_r, XSIZE*YSIZE);
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
scales_channel_mul_kernel<<<gridBlock,threadBlock>>>(data_l,data_r,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
scales_channel_mul_kernel<<<gridBlock,threadBlock>>>(data_l,data_r,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
scales_channel_mul_kernel<<<gridBlock,threadBlock>>>(data_l,data_r,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f229ad8746741d936579077e046b69ad76c0d780.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Hartwig Anzt
@precisions normal z -> s d c
*/
#include "magmasparse_internal.h"
#define PRECISION_z
__global__ void
magma_zjaccardweights_kernel(
magma_int_t num_rows,
magma_int_t num_cols,
magma_int_t nnzJ,
magma_index_t *rowidxJ,
magma_index_t *colidxJ,
magmaDoubleComplex *valJ,
magma_index_t *rowptrA,
magma_index_t *colidxA,
magmaDoubleComplex *valA ) {
int i, j;
int k = blockDim.x * gridDim.x * blockIdx.y
+ blockDim.x * blockIdx.x + threadIdx.x;
magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex one = MAGMA_Z_MAKE(1.0, 0.0);
magmaDoubleComplex sum_i, sum_j, intersect;
int il, iu, jl, ju;
if (k < nnzJ)
{
i = rowidxJ[k];
j = colidxJ[k];
if( i != j ){
il = rowptrA[i];
iu = rowptrA[j];
sum_i = zero;
sum_j = zero;
intersect = zero;
sum_i = MAGMA_Z_MAKE((double)rowptrA[i+1] - rowptrA[i], 0.0);
sum_j = MAGMA_Z_MAKE((double)rowptrA[j+1] - rowptrA[j], 0.0);
while (il < rowptrA[i+1] && iu < rowptrA[j+1])
{
jl = colidxJ[il];
ju = rowidxJ[iu];
// avoid branching
// if there are actual values:
// intersect = ( jl == ju ) ? valJ[il] * valJ[iu] : sp;
// else
intersect = ( jl == ju ) ? intersect + one : intersect;
il = ( jl <= ju ) ? il+1 : il;
iu = ( ju <= jl ) ? iu+1 : iu;
}
valJ[k] = MAGMA_Z_MAKE(MAGMA_Z_REAL(intersect) / MAGMA_Z_REAL( sum_i + sum_j - intersect), 0.0 );
} else {
valJ[k] = MAGMA_Z_ONE;
}
}
}// end kernel
/**
Purpose
-------
Computes Jaccard weights for a matrix
Arguments
---------
@param[in]
A magma_z_matrix
input matrix
@param[out]
J magma_z_matrix*
Jaccard weights
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgepr
********************************************************************/
extern "C"
magma_int_t
magma_zjaccard_weights(
magma_z_matrix A,
magma_z_matrix *J,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_int_t m = J->num_rows;
magma_int_t n = J->num_rows;
magma_int_t nnz = J->nnz;
int blocksize1 = 32;
int blocksize2 = 1;
int dimgrid1 = sqrt( magma_ceildiv( nnz, blocksize1 ) );
int dimgrid2 = magma_ceildiv(nnz, blocksize1*dimgrid1);
int dimgrid3 = 1;
// printf("thread block: ( %d x %d ) x [%d x %d]\n", blocksize1, blocksize2, dimgrid1, dimgrid2);
// Runtime API
// hipFuncCachePreferShared: shared memory is 48 KB
// hipFuncCachePreferEqual: shared memory is 32 KB
// hipFuncCachePreferL1: shared memory is 16 KB
// hipFuncCachePreferNone: no preference
// (spaces are added to prevent expansion from the script from messing up)
// cudaFunc Set CacheConfig(hipFuncCache_t PreferShared);
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_zjaccardweights_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, nnz,
J->drowidx,
J->dcol,
J->dval,
A.drow,
A.dcol,
A.dval );
return info;
}
| f229ad8746741d936579077e046b69ad76c0d780.cu | /*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Hartwig Anzt
@precisions normal z -> s d c
*/
#include "magmasparse_internal.h"
#define PRECISION_z
__global__ void
magma_zjaccardweights_kernel(
magma_int_t num_rows,
magma_int_t num_cols,
magma_int_t nnzJ,
magma_index_t *rowidxJ,
magma_index_t *colidxJ,
magmaDoubleComplex *valJ,
magma_index_t *rowptrA,
magma_index_t *colidxA,
magmaDoubleComplex *valA ) {
int i, j;
int k = blockDim.x * gridDim.x * blockIdx.y
+ blockDim.x * blockIdx.x + threadIdx.x;
magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex one = MAGMA_Z_MAKE(1.0, 0.0);
magmaDoubleComplex sum_i, sum_j, intersect;
int il, iu, jl, ju;
if (k < nnzJ)
{
i = rowidxJ[k];
j = colidxJ[k];
if( i != j ){
il = rowptrA[i];
iu = rowptrA[j];
sum_i = zero;
sum_j = zero;
intersect = zero;
sum_i = MAGMA_Z_MAKE((double)rowptrA[i+1] - rowptrA[i], 0.0);
sum_j = MAGMA_Z_MAKE((double)rowptrA[j+1] - rowptrA[j], 0.0);
while (il < rowptrA[i+1] && iu < rowptrA[j+1])
{
jl = colidxJ[il];
ju = rowidxJ[iu];
// avoid branching
// if there are actual values:
// intersect = ( jl == ju ) ? valJ[il] * valJ[iu] : sp;
// else
intersect = ( jl == ju ) ? intersect + one : intersect;
il = ( jl <= ju ) ? il+1 : il;
iu = ( ju <= jl ) ? iu+1 : iu;
}
valJ[k] = MAGMA_Z_MAKE(MAGMA_Z_REAL(intersect) / MAGMA_Z_REAL( sum_i + sum_j - intersect), 0.0 );
} else {
valJ[k] = MAGMA_Z_ONE;
}
}
}// end kernel
/**
Purpose
-------
Computes Jaccard weights for a matrix
Arguments
---------
@param[in]
A magma_z_matrix
input matrix
@param[out]
J magma_z_matrix*
Jaccard weights
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgepr
********************************************************************/
extern "C"
magma_int_t
magma_zjaccard_weights(
magma_z_matrix A,
magma_z_matrix *J,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_int_t m = J->num_rows;
magma_int_t n = J->num_rows;
magma_int_t nnz = J->nnz;
int blocksize1 = 32;
int blocksize2 = 1;
int dimgrid1 = sqrt( magma_ceildiv( nnz, blocksize1 ) );
int dimgrid2 = magma_ceildiv(nnz, blocksize1*dimgrid1);
int dimgrid3 = 1;
// printf("thread block: ( %d x %d ) x [%d x %d]\n", blocksize1, blocksize2, dimgrid1, dimgrid2);
// Runtime API
// cudaFuncCachePreferShared: shared memory is 48 KB
// cudaFuncCachePreferEqual: shared memory is 32 KB
// cudaFuncCachePreferL1: shared memory is 16 KB
// cudaFuncCachePreferNone: no preference
// (spaces are added to prevent expansion from the script from messing up)
// cudaFunc Set CacheConfig(cudaFuncCache PreferShared);
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_zjaccardweights_kernel<<< grid, block, 0, queue->cuda_stream() >>>(
m, n, nnz,
J->drowidx,
J->dcol,
J->dval,
A.drow,
A.dcol,
A.dval );
return info;
}
|
Kv2like_gpu.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace allen_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_h1 __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_h2 __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type mBeta, mAlpha, hInf, ll0_, ll1_;
ll1_ = 0.;
ll0_ = 0.;
ll0_ = 43.0-v;
ll1_ = 11.0*exprelr(ll0_* 0.090909090909090912);
mAlpha = 0.12*ll1_;
mBeta = 0.02*exp( -(v+ 1.27)* 0.0083333333333333332);
hInf = 1.0/( 1.0+exp((v+ 58.0)* 0.090909090909090912));
_pp_var_m[tid_] = mAlpha/(mAlpha+mBeta);
_pp_var_h1[tid_] = hInf;
_pp_var_h2[tid_] = hInf;
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type a_1_, a_2_, a_0_, ll4_, qt, mRat, mBeta, hInf, ba_0_, ba_1_, ll3_, ll6_, h1Rat, ll1_, ba_2_, ll2_, mAlpha, h2Rat, ll5_, ll0_, ll7_;
ll7_ = 0.;
ll6_ = 0.;
ll5_ = 0.;
ll4_ = 0.;
ll3_ = 0.;
ll2_ = 0.;
ll1_ = 0.;
ll0_ = 0.;
qt = pow( 2.2999999999999998, (celsius- 21.0)* 0.10000000000000001);
ll0_ = 43.0-v;
ll1_ = 11.0*exprelr(ll0_* 0.090909090909090912);
mAlpha = 0.12*ll1_;
mBeta = 0.02*exp( -(v+ 1.27)* 0.0083333333333333332);
mRat = 0.40000000000000002*qt*(mAlpha+mBeta);
hInf = 1.0/( 1.0+exp((v+ 58.0)* 0.090909090909090912));
h1Rat = qt/( 360.0+( 1010.0+ 23.699999999999999*(v+ 54.0))*exp(pow( -((v+ 75.0)* 0.020833333333333332), 2.0)));
h2Rat = qt/( 2350.0+ 1380.0*exp( -0.010999999999999999*v)- 210.0*exp( -0.029999999999999999*v));
if (h2Rat< 0.) {
h2Rat = 0.001;
}
a_0_ = -mRat;
ba_0_ = 0.40000000000000002*qt*mAlpha/a_0_;
ll2_ = a_0_*dt;
ll3_ = ( 1.0+ 0.5*ll2_)/( 1.0- 0.5*ll2_);
_pp_var_m[tid_] = -ba_0_+(_pp_var_m[tid_]+ba_0_)*ll3_;
a_1_ = -1.0*h1Rat;
ba_1_ = hInf*h1Rat/a_1_;
ll4_ = a_1_*dt;
ll5_ = ( 1.0+ 0.5*ll4_)/( 1.0- 0.5*ll4_);
_pp_var_h1[tid_] = -ba_1_+(_pp_var_h1[tid_]+ba_1_)*ll5_;
a_2_ = -1.0*h2Rat;
ba_2_ = hInf*h2Rat/a_2_;
ll6_ = a_2_*dt;
ll7_ = ( 1.0+ 0.5*ll6_)/( 1.0- 0.5*ll6_);
_pp_var_h2[tid_] = -ba_2_+(_pp_var_h2[tid_]+ba_2_)*ll7_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = 0.5*_pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*(_pp_var_h1[tid_]+_pp_var_h2[tid_])*(v-ek);
current_ = ik;
conductivity_ = 0.5*_pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*(_pp_var_h1[tid_]+_pp_var_h2[tid_]);
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_Kv2like_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
if (!p->multiplicity) return;
hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(3}), block_dim, 0, *p);
}
void mechanism_Kv2like_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_Kv2like_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_Kv2like_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_Kv2like_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_Kv2like_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace allen_catalogue
} // namespace arb
| Kv2like_gpu.cu | #include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace allen_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_h1 __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_h2 __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type mBeta, mAlpha, hInf, ll0_, ll1_;
ll1_ = 0.;
ll0_ = 0.;
ll0_ = 43.0-v;
ll1_ = 11.0*exprelr(ll0_* 0.090909090909090912);
mAlpha = 0.12*ll1_;
mBeta = 0.02*exp( -(v+ 1.27)* 0.0083333333333333332);
hInf = 1.0/( 1.0+exp((v+ 58.0)* 0.090909090909090912));
_pp_var_m[tid_] = mAlpha/(mAlpha+mBeta);
_pp_var_h1[tid_] = hInf;
_pp_var_h2[tid_] = hInf;
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type a_1_, a_2_, a_0_, ll4_, qt, mRat, mBeta, hInf, ba_0_, ba_1_, ll3_, ll6_, h1Rat, ll1_, ba_2_, ll2_, mAlpha, h2Rat, ll5_, ll0_, ll7_;
ll7_ = 0.;
ll6_ = 0.;
ll5_ = 0.;
ll4_ = 0.;
ll3_ = 0.;
ll2_ = 0.;
ll1_ = 0.;
ll0_ = 0.;
qt = pow( 2.2999999999999998, (celsius- 21.0)* 0.10000000000000001);
ll0_ = 43.0-v;
ll1_ = 11.0*exprelr(ll0_* 0.090909090909090912);
mAlpha = 0.12*ll1_;
mBeta = 0.02*exp( -(v+ 1.27)* 0.0083333333333333332);
mRat = 0.40000000000000002*qt*(mAlpha+mBeta);
hInf = 1.0/( 1.0+exp((v+ 58.0)* 0.090909090909090912));
h1Rat = qt/( 360.0+( 1010.0+ 23.699999999999999*(v+ 54.0))*exp(pow( -((v+ 75.0)* 0.020833333333333332), 2.0)));
h2Rat = qt/( 2350.0+ 1380.0*exp( -0.010999999999999999*v)- 210.0*exp( -0.029999999999999999*v));
if (h2Rat< 0.) {
h2Rat = 0.001;
}
a_0_ = -mRat;
ba_0_ = 0.40000000000000002*qt*mAlpha/a_0_;
ll2_ = a_0_*dt;
ll3_ = ( 1.0+ 0.5*ll2_)/( 1.0- 0.5*ll2_);
_pp_var_m[tid_] = -ba_0_+(_pp_var_m[tid_]+ba_0_)*ll3_;
a_1_ = -1.0*h1Rat;
ba_1_ = hInf*h1Rat/a_1_;
ll4_ = a_1_*dt;
ll5_ = ( 1.0+ 0.5*ll4_)/( 1.0- 0.5*ll4_);
_pp_var_h1[tid_] = -ba_1_+(_pp_var_h1[tid_]+ba_1_)*ll5_;
a_2_ = -1.0*h2Rat;
ba_2_ = hInf*h2Rat/a_2_;
ll6_ = a_2_*dt;
ll7_ = ( 1.0+ 0.5*ll6_)/( 1.0- 0.5*ll6_);
_pp_var_h2[tid_] = -ba_2_+(_pp_var_h2[tid_]+ba_2_)*ll7_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = 0.5*_pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*(_pp_var_h1[tid_]+_pp_var_h2[tid_])*(v-ek);
current_ = ik;
conductivity_ = 0.5*_pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*(_pp_var_h1[tid_]+_pp_var_h2[tid_]);
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_Kv2like_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
init<<<grid_dim, block_dim>>>(*p);
if (!p->multiplicity) return;
multiply<<<dim3{grid_dim, 3}, block_dim>>>(*p);
}
void mechanism_Kv2like_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
compute_currents<<<grid_dim, block_dim>>>(*p);
}
void mechanism_Kv2like_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
advance_state<<<grid_dim, block_dim>>>(*p);
}
void mechanism_Kv2like_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_Kv2like_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_Kv2like_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace allen_catalogue
} // namespace arb
|
bc6fef1a3717f048a7af1ea00ffc5030f5e22eac.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* FILENAME: test_image_alphablend.cu
*
* AUTHORS: Liang Jia START DATE: Saturday August 14th 2021
*
* LAST MODIFIED: Monday, August 16th 2021, 1:33:28 pm
*
* CONTACT: [email protected]
*******************************************************************************/
#define CATCH_CONFIG_MAIN
#include <hip/hip_runtime.h>
#include <cudaop/cudaop.h>
#include <macro.h>
#include <utils.h>
#include <catch2/catch.hpp>
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace smartmore::cudaop;
bool AlphaBlending(cv::Mat &bg_mat, const cv::Mat &fg_mat, const cv::Mat &mask_mat, cv::Point roi_ltpt) {
if (bg_mat.empty() || fg_mat.empty() || mask_mat.empty()) {
std::cout << "invalid images\n";
return false;
}
if (!roi_ltpt.inside(cv::Rect(0, 0, bg_mat.cols - fg_mat.cols, bg_mat.rows - fg_mat.rows))) {
std::cout << "blending roi must be inside the background image\n";
return false;
}
if (bg_mat.type() != fg_mat.type() || mask_mat.type() != CV_32FC1) {
std::cout << "background image and foreground image must be the same type\n";
std::cout << "mask image must be gray type\n";
return false;
}
cv::Mat mask;
cv::cvtColor(mask_mat, mask, cv::COLOR_GRAY2BGR);
cv::Mat work_mat = bg_mat(cv::Rect(roi_ltpt, fg_mat.size())).clone();
// Find number of pixels.
int numberOfPixels = fg_mat.rows * fg_mat.cols * fg_mat.channels();
// Get floating point pointers to the data matrices
float *fptr = reinterpret_cast<float *>(fg_mat.data);
float *bptr = reinterpret_cast<float *>(work_mat.data);
float *aptr = reinterpret_cast<float *>(mask.data);
for (int i = 0; i < numberOfPixels; i++, fptr++, aptr++, bptr++) {
*bptr = (*fptr) * (*aptr) + (*bptr) * (1 - *aptr);
}
work_mat.copyTo(bg_mat(cv::Rect(roi_ltpt, fg_mat.size())));
return true;
}
TEST_CASE("ImageAlphaBlend", "[image_alphablend]") {
// background
const int bg_h = 1080, bg_w = 1920, bg_c = 3;
// foreground
const int fg_h = 540, fg_w = 960, fg_c = 3;
// mask
const int mask_h = fg_h, mask_w = fg_w, mask_c = 1;
float *bg_device, *fg_device, *mask_device;
std::vector<float> bg_data(bg_h * bg_w * bg_c);
smartmore::RandomFloatVector(bg_data);
std::vector<float> fg_data(fg_h * fg_w * fg_c);
smartmore::RandomFloatVector(fg_data);
std::vector<float> mask_data(mask_h * mask_w * mask_c);
smartmore::RandomFloatVector(mask_data);
cv::Mat bg_f = cv::Mat(bg_h, bg_w, CV_32FC3, bg_data.data());
cv::Mat fg_f = cv::Mat(fg_h, fg_w, CV_32FC3, fg_data.data());
cv::Mat mask_f = cv::Mat(mask_h, mask_w, CV_32FC1, mask_data.data());
cv::Mat actual_mat = bg_f.clone();
cv::Mat expet_mat = bg_f.clone();
size_t bg_data_size = sizeof(float) * bg_f.cols * bg_f.rows * bg_f.channels();
size_t fg_data_size = sizeof(float) * fg_f.cols * fg_f.rows * fg_f.channels();
size_t mask_data_size = sizeof(float) * mask_f.cols * mask_f.rows * mask_f.channels();
CUDA_CHECK(hipMalloc(&bg_device, bg_data_size));
CUDA_CHECK(hipMemcpy(bg_device, bg_f.data, bg_data_size, hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&fg_device, fg_data_size));
CUDA_CHECK(hipMemcpy(fg_device, fg_f.data, fg_data_size, hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&mask_device, mask_data_size));
CUDA_CHECK(hipMemcpy(mask_device, mask_f.data, mask_data_size, hipMemcpyHostToDevice));
{
smartmore::Clock clk("image alpha blend cost time: ");
ImageAlphaBlend<ImageType::kBGR_HWC, DataType::kFloat32>(fg_device, bg_device, mask_device, Size{bg_w, bg_h},
Rect{Point{100, 100}, Size{fg_w, fg_h}});
}
CUDA_CHECK(hipMemcpy(actual_mat.data, bg_device, bg_data_size, hipMemcpyDeviceToHost));
if (!AlphaBlending(expet_mat, fg_f, mask_f, cv::Point(100, 100))) {
REQUIRE(1 < 0.0001);
}
float max_diff = smartmore::CVMatMaxDiff(actual_mat, expet_mat);
REQUIRE(max_diff < 0.0001);
CUDA_CHECK_AND_FREE(bg_device);
CUDA_CHECK_AND_FREE(fg_device);
CUDA_CHECK_AND_FREE(mask_device);
} | bc6fef1a3717f048a7af1ea00ffc5030f5e22eac.cu | /*******************************************************************************
* FILENAME: test_image_alphablend.cu
*
* AUTHORS: Liang Jia START DATE: Saturday August 14th 2021
*
* LAST MODIFIED: Monday, August 16th 2021, 1:33:28 pm
*
* CONTACT: [email protected]
*******************************************************************************/
#define CATCH_CONFIG_MAIN
#include <cuda_runtime.h>
#include <cudaop/cudaop.h>
#include <macro.h>
#include <utils.h>
#include <catch2/catch.hpp>
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace smartmore::cudaop;
bool AlphaBlending(cv::Mat &bg_mat, const cv::Mat &fg_mat, const cv::Mat &mask_mat, cv::Point roi_ltpt) {
if (bg_mat.empty() || fg_mat.empty() || mask_mat.empty()) {
std::cout << "invalid images\n";
return false;
}
if (!roi_ltpt.inside(cv::Rect(0, 0, bg_mat.cols - fg_mat.cols, bg_mat.rows - fg_mat.rows))) {
std::cout << "blending roi must be inside the background image\n";
return false;
}
if (bg_mat.type() != fg_mat.type() || mask_mat.type() != CV_32FC1) {
std::cout << "background image and foreground image must be the same type\n";
std::cout << "mask image must be gray type\n";
return false;
}
cv::Mat mask;
cv::cvtColor(mask_mat, mask, cv::COLOR_GRAY2BGR);
cv::Mat work_mat = bg_mat(cv::Rect(roi_ltpt, fg_mat.size())).clone();
// Find number of pixels.
int numberOfPixels = fg_mat.rows * fg_mat.cols * fg_mat.channels();
// Get floating point pointers to the data matrices
float *fptr = reinterpret_cast<float *>(fg_mat.data);
float *bptr = reinterpret_cast<float *>(work_mat.data);
float *aptr = reinterpret_cast<float *>(mask.data);
for (int i = 0; i < numberOfPixels; i++, fptr++, aptr++, bptr++) {
*bptr = (*fptr) * (*aptr) + (*bptr) * (1 - *aptr);
}
work_mat.copyTo(bg_mat(cv::Rect(roi_ltpt, fg_mat.size())));
return true;
}
TEST_CASE("ImageAlphaBlend", "[image_alphablend]") {
// background
const int bg_h = 1080, bg_w = 1920, bg_c = 3;
// foreground
const int fg_h = 540, fg_w = 960, fg_c = 3;
// mask
const int mask_h = fg_h, mask_w = fg_w, mask_c = 1;
float *bg_device, *fg_device, *mask_device;
std::vector<float> bg_data(bg_h * bg_w * bg_c);
smartmore::RandomFloatVector(bg_data);
std::vector<float> fg_data(fg_h * fg_w * fg_c);
smartmore::RandomFloatVector(fg_data);
std::vector<float> mask_data(mask_h * mask_w * mask_c);
smartmore::RandomFloatVector(mask_data);
cv::Mat bg_f = cv::Mat(bg_h, bg_w, CV_32FC3, bg_data.data());
cv::Mat fg_f = cv::Mat(fg_h, fg_w, CV_32FC3, fg_data.data());
cv::Mat mask_f = cv::Mat(mask_h, mask_w, CV_32FC1, mask_data.data());
cv::Mat actual_mat = bg_f.clone();
cv::Mat expet_mat = bg_f.clone();
size_t bg_data_size = sizeof(float) * bg_f.cols * bg_f.rows * bg_f.channels();
size_t fg_data_size = sizeof(float) * fg_f.cols * fg_f.rows * fg_f.channels();
size_t mask_data_size = sizeof(float) * mask_f.cols * mask_f.rows * mask_f.channels();
CUDA_CHECK(cudaMalloc(&bg_device, bg_data_size));
CUDA_CHECK(cudaMemcpy(bg_device, bg_f.data, bg_data_size, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&fg_device, fg_data_size));
CUDA_CHECK(cudaMemcpy(fg_device, fg_f.data, fg_data_size, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_device, mask_data_size));
CUDA_CHECK(cudaMemcpy(mask_device, mask_f.data, mask_data_size, cudaMemcpyHostToDevice));
{
smartmore::Clock clk("image alpha blend cost time: ");
ImageAlphaBlend<ImageType::kBGR_HWC, DataType::kFloat32>(fg_device, bg_device, mask_device, Size{bg_w, bg_h},
Rect{Point{100, 100}, Size{fg_w, fg_h}});
}
CUDA_CHECK(cudaMemcpy(actual_mat.data, bg_device, bg_data_size, cudaMemcpyDeviceToHost));
if (!AlphaBlending(expet_mat, fg_f, mask_f, cv::Point(100, 100))) {
REQUIRE(1 < 0.0001);
}
float max_diff = smartmore::CVMatMaxDiff(actual_mat, expet_mat);
REQUIRE(max_diff < 0.0001);
CUDA_CHECK_AND_FREE(bg_device);
CUDA_CHECK_AND_FREE(fg_device);
CUDA_CHECK_AND_FREE(mask_device);
} |
558ffef4fa3dba43004306746842d002996e5089.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaFlow.h"
__global__ void SolveSmoothDualTVGlobalKernel (float* duhat, float* dvhat,
float* pu1, float* pu2,
float* pv1, float* pv2,
int width, int height, int stride,
float tau, float theta,
float *pu1s, float *pu2s,
float *pv1s, float* pv2s)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
int left = (ix - 1) + iy * stride;
int right = (ix + 1) + iy * stride;
int down = ix + (iy - 1) * stride;
int up = ix + (iy + 1) * stride;
//solve derivatives of duhat and dvhat
float dux, duy, dvx, dvy;
if ((ix + 1) >= width) {
//dux = duhat[pos] - duhat[left];
//dvx = dvhat[pos] - dvhat[left];
dux = 0;
dvx = 0;
}
else {
dux = duhat[right] - duhat[pos];
dvx = dvhat[right] - dvhat[pos];
}
if ((iy + 1) >= height) {
//duy = duhat[pos] - duhat[down];
//dvy = dvhat[pos] - dvhat[down];
duy = 0;
dvy = 0;
}
else {
duy = duhat[up] - duhat[pos];
dvy = dvhat[up] - dvhat[pos];
}
float magdu = sqrt(dux*dux + duy*duy);
float magdv = sqrt(dvx*dvx + dvy*dvy);
float fac = tau / theta;
float pu1sub = pu1[pos];
float pu2sub = pu2[pos];
float pv1sub = pv1[pos];
float pv2sub = pv2[pos];
for (int k = 0; k < 1; k++) {
pu1sub = (pu1sub + fac*dux) / (1 + fac*magdu);
pu2sub = (pu2sub + fac*duy) / (1 + fac*magdu);
pv1sub = (pv1sub + fac*dvx) / (1 + fac*magdv);
pv2sub = (pv2sub + fac*dvy) / (1 + fac*magdv);
}
pu1s[pos] = pu1sub;
pu2s[pos] = pu2sub;
pv1s[pos] = pv1sub;
pv2s[pos] = pv2sub;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief compute image derivatives
///
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] w image width
/// \param[in] h image height
/// \param[in] s image stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
void sor::CudaFlow::SolveSmoothDualTVGlobal(float *duhat, float *dvhat,
float *pu1, float *pu2, float *pv1, float *pv2,
int w, int h, int s,
float tau, float theta,
float *pu1s, float*pu2s,
float *pv1s, float *pv2s
)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
hipLaunchKernelGGL(( SolveSmoothDualTVGlobalKernel) , dim3(blocks), dim3(threads) , 0, 0, duhat, dvhat,
pu1, pu2, pv1, pv2,
w, h, s,
tau, theta,
pu1s, pu2s, pv1s, pv2s);
}
| 558ffef4fa3dba43004306746842d002996e5089.cu | #include "CudaFlow.h"
__global__ void SolveSmoothDualTVGlobalKernel (float* duhat, float* dvhat,
float* pu1, float* pu2,
float* pv1, float* pv2,
int width, int height, int stride,
float tau, float theta,
float *pu1s, float *pu2s,
float *pv1s, float* pv2s)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
int left = (ix - 1) + iy * stride;
int right = (ix + 1) + iy * stride;
int down = ix + (iy - 1) * stride;
int up = ix + (iy + 1) * stride;
//solve derivatives of duhat and dvhat
float dux, duy, dvx, dvy;
if ((ix + 1) >= width) {
//dux = duhat[pos] - duhat[left];
//dvx = dvhat[pos] - dvhat[left];
dux = 0;
dvx = 0;
}
else {
dux = duhat[right] - duhat[pos];
dvx = dvhat[right] - dvhat[pos];
}
if ((iy + 1) >= height) {
//duy = duhat[pos] - duhat[down];
//dvy = dvhat[pos] - dvhat[down];
duy = 0;
dvy = 0;
}
else {
duy = duhat[up] - duhat[pos];
dvy = dvhat[up] - dvhat[pos];
}
float magdu = sqrt(dux*dux + duy*duy);
float magdv = sqrt(dvx*dvx + dvy*dvy);
float fac = tau / theta;
float pu1sub = pu1[pos];
float pu2sub = pu2[pos];
float pv1sub = pv1[pos];
float pv2sub = pv2[pos];
for (int k = 0; k < 1; k++) {
pu1sub = (pu1sub + fac*dux) / (1 + fac*magdu);
pu2sub = (pu2sub + fac*duy) / (1 + fac*magdu);
pv1sub = (pv1sub + fac*dvx) / (1 + fac*magdv);
pv2sub = (pv2sub + fac*dvy) / (1 + fac*magdv);
}
pu1s[pos] = pu1sub;
pu2s[pos] = pu2sub;
pv1s[pos] = pv1sub;
pv2s[pos] = pv2sub;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief compute image derivatives
///
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] w image width
/// \param[in] h image height
/// \param[in] s image stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
void sor::CudaFlow::SolveSmoothDualTVGlobal(float *duhat, float *dvhat,
float *pu1, float *pu2, float *pv1, float *pv2,
int w, int h, int s,
float tau, float theta,
float *pu1s, float*pu2s,
float *pv1s, float *pv2s
)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
SolveSmoothDualTVGlobalKernel <<< blocks, threads >>> (duhat, dvhat,
pu1, pu2, pv1, pv2,
w, h, s,
tau, theta,
pu1s, pu2s, pv1s, pv2s);
}
|
439e5c7ccf8298185f91c67248b52405a6e7ef00.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2018 XIAOLIN WANG ([email protected]; [email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "NetworkMt.h"
#include "WeightFactory.h"
#include "ParamsMt.h"
namespace cytonMt
{
void NetworkMt::init()
{
weightFactory.init(params.optimization);
batch.init(cytonLib::batchSize, params.maxSeqLen);
decodings.preInit(params.maxSeqLen, params.mode);
Variable* tx=embeddingSrc.init("sourceEmbedding", &batch.srcMat, &batch.hSrcMat,
params.srcVocabSize, params.embedSize);
layers.push_back(&embeddingSrc);
tx=encoder.init( tx, params.maxSeqLen, batchSize, params.numLayers,
params.embedSize, params.hiddenSize.at(0));
layers.push_back(&encoder);
Weight* tw=params.srcTrgShareEmbed?&embeddingSrc.cell->w:NULL;
decodings.init( tx, encoder.hy, encoder.cy, &batch.trgMat, &batch.hTrgMat,
params.trgVocabSize, params.embedSize, params.hiddenSize.at(0), params.numLayers,
&trgVocab, tw);
layers.push_back(&decodings);
weightFactory.alloc(params.clipGradient);
}
void NetworkMt::backward()
{
decodings.backward();
encoder.backward();
embeddingSrc.backward();
}
Precision NetworkMt::train(Precision lambda, bool updateParams)
{
assert(cytonLib::testMode==false);
this->forward();
hipDeviceSynchronize();
this->backward();
hipDeviceSynchronize();
Precision likehood=decodings.backwardScore;
weightFactory.clearGrad();
this->calculateGradient();
if(updateParams)
{
weightFactory.update(lambda*batch.factor);
}
nBatches+=1;
Precision likehood1=likehood*batch.factor;
return likehood1;
}
Precision NetworkMt::getScore()
{
this->forward();
hipDeviceSynchronize();
Precision likehood=decodings.getScore();
hipDeviceSynchronize();
return likehood;
}
void NetworkMt::saveModel(string modelName)
{
weightFactory.save(modelName);
}
void NetworkMt::loadModel(string modelName)
{
weightFactory.load(modelName);
}
void NetworkMt::prevApply()
{
}
string NetworkMt::apply(vector<string>& words, Precision& likeli)
{
assert(testMode);
embeddingSrc.forward();
encoder.forward();
return "";
}
void NetworkMt::getInitState(ModelState* state)
{
decodings.getInitState(state);
}
void NetworkMt::prevBeamSearch()
{
embeddingSrc.forward();
encoder.forward();
}
void NetworkMt::beamSearchTransfer(ModelState* start, int input,
ModelState* end, DevMatPrec& outputs)
{
decodings.transfer(start, input, end, outputs);
}
}
| 439e5c7ccf8298185f91c67248b52405a6e7ef00.cu | /*
Copyright 2018 XIAOLIN WANG ([email protected]; [email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "NetworkMt.h"
#include "WeightFactory.h"
#include "ParamsMt.h"
namespace cytonMt
{
void NetworkMt::init()
{
weightFactory.init(params.optimization);
batch.init(cytonLib::batchSize, params.maxSeqLen);
decodings.preInit(params.maxSeqLen, params.mode);
Variable* tx=embeddingSrc.init("sourceEmbedding", &batch.srcMat, &batch.hSrcMat,
params.srcVocabSize, params.embedSize);
layers.push_back(&embeddingSrc);
tx=encoder.init( tx, params.maxSeqLen, batchSize, params.numLayers,
params.embedSize, params.hiddenSize.at(0));
layers.push_back(&encoder);
Weight* tw=params.srcTrgShareEmbed?&embeddingSrc.cell->w:NULL;
decodings.init( tx, encoder.hy, encoder.cy, &batch.trgMat, &batch.hTrgMat,
params.trgVocabSize, params.embedSize, params.hiddenSize.at(0), params.numLayers,
&trgVocab, tw);
layers.push_back(&decodings);
weightFactory.alloc(params.clipGradient);
}
void NetworkMt::backward()
{
decodings.backward();
encoder.backward();
embeddingSrc.backward();
}
Precision NetworkMt::train(Precision lambda, bool updateParams)
{
assert(cytonLib::testMode==false);
this->forward();
cudaDeviceSynchronize();
this->backward();
cudaDeviceSynchronize();
Precision likehood=decodings.backwardScore;
weightFactory.clearGrad();
this->calculateGradient();
if(updateParams)
{
weightFactory.update(lambda*batch.factor);
}
nBatches+=1;
Precision likehood1=likehood*batch.factor;
return likehood1;
}
Precision NetworkMt::getScore()
{
this->forward();
cudaDeviceSynchronize();
Precision likehood=decodings.getScore();
cudaDeviceSynchronize();
return likehood;
}
void NetworkMt::saveModel(string modelName)
{
weightFactory.save(modelName);
}
void NetworkMt::loadModel(string modelName)
{
weightFactory.load(modelName);
}
void NetworkMt::prevApply()
{
}
string NetworkMt::apply(vector<string>& words, Precision& likeli)
{
assert(testMode);
embeddingSrc.forward();
encoder.forward();
return "";
}
void NetworkMt::getInitState(ModelState* state)
{
decodings.getInitState(state);
}
void NetworkMt::prevBeamSearch()
{
embeddingSrc.forward();
encoder.forward();
}
void NetworkMt::beamSearchTransfer(ModelState* start, int input,
ModelState* end, DevMatPrec& outputs)
{
decodings.transfer(start, input, end, outputs);
}
}
|
c666900bff6a8e0729f9894a6f813c7a9ac9506b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void d_minfilter_x(float *src, float *dst, int width, int height, int r, int BLOCKSIZE){
int i, j;
int mask, len, extra, num, head, rear;
int bid, tid;
bid = blockIdx.x;
tid = threadIdx.x;
__shared__ float g[MAX_SIZE];
__shared__ float h[MAX_SIZE];
mask = 2 * r + 1;
len = width + 2 * r + mask - (width + 2 * r) % mask;
extra = len - width - r;
num = len / mask;
if (bid<height){
for (i = tid; i<r; i += BLOCKSIZE){
g[i] = MAX_VALUE;
h[i] = MAX_VALUE;
}
__syncthreads();
for (i = tid; i<width; i += BLOCKSIZE){
g[r + i] = src[bid * width + i];
h[r + i] = src[bid * width + i];
}
__syncthreads();
for (i = tid; i<extra; i += BLOCKSIZE){
g[r + width + i] = MAX_VALUE;
h[r + width + i] = MAX_VALUE;
}
__syncthreads();
for (i = tid; i<num; i += BLOCKSIZE){
head = i*mask;
rear = (i + 1)*mask - 1;
for (j = head + 1; j<(head + mask); j++){
g[j] = (g[j - 1] < g[j]) ? g[j - 1] : g[j];
h[rear - j + head] = (h[rear - j + head + 1] < h[rear - j + head]) ? h[rear - j + head + 1] : h[rear - j + head];
}
}
__syncthreads();
for (i = tid; i<width; i += BLOCKSIZE)
dst[bid * width + i] = (g[i + r + r] > h[i]) ? h[i] : g[i + r + r];
}
} | c666900bff6a8e0729f9894a6f813c7a9ac9506b.cu | __global__ void d_minfilter_x(float *src, float *dst, int width, int height, int r, int BLOCKSIZE){
int i, j;
int mask, len, extra, num, head, rear;
int bid, tid;
bid = blockIdx.x;
tid = threadIdx.x;
__shared__ float g[MAX_SIZE];
__shared__ float h[MAX_SIZE];
mask = 2 * r + 1;
len = width + 2 * r + mask - (width + 2 * r) % mask;
extra = len - width - r;
num = len / mask;
if (bid<height){
for (i = tid; i<r; i += BLOCKSIZE){
g[i] = MAX_VALUE;
h[i] = MAX_VALUE;
}
__syncthreads();
for (i = tid; i<width; i += BLOCKSIZE){
g[r + i] = src[bid * width + i];
h[r + i] = src[bid * width + i];
}
__syncthreads();
for (i = tid; i<extra; i += BLOCKSIZE){
g[r + width + i] = MAX_VALUE;
h[r + width + i] = MAX_VALUE;
}
__syncthreads();
for (i = tid; i<num; i += BLOCKSIZE){
head = i*mask;
rear = (i + 1)*mask - 1;
for (j = head + 1; j<(head + mask); j++){
g[j] = (g[j - 1] < g[j]) ? g[j - 1] : g[j];
h[rear - j + head] = (h[rear - j + head + 1] < h[rear - j + head]) ? h[rear - j + head + 1] : h[rear - j + head];
}
}
__syncthreads();
for (i = tid; i<width; i += BLOCKSIZE)
dst[bid * width + i] = (g[i + r + r] > h[i]) ? h[i] : g[i + r + r];
}
} |
4c5672e6ac1a09b737183db00d705755019e4b0d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "process.h"
void cuda_process(Node* nodes_host) {
// Node* nodes;
// hipMemcpy(nodes, nodes_host, sizeof(nodes_host), hipMemcpyHostToDevice);
printf("Yo GPU time\n");
}
__global__ void kernel_process(void*) {
printf("things\n");
} | 4c5672e6ac1a09b737183db00d705755019e4b0d.cu | #include <stdio.h>
#include <cuda.h>
#include "process.h"
void cuda_process(Node* nodes_host) {
// Node* nodes;
// cudaMemcpy(nodes, nodes_host, sizeof(nodes_host), cudaMemcpyHostToDevice);
printf("Yo GPU time\n");
}
__global__ void kernel_process(void*) {
printf("things\n");
} |
e509aa9df704d95b5aa23dea3e0956793609a58e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaSupport.h"
#include "stdio.h"
// GPU Error check helper
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPU ASSERT: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
namespace CudaSupport {
const unsigned int MaxBlockSize = 512;
unsigned int blockSize;
unsigned int numBlocks;
// Device constants - initialized once for usage
__device__ __constant__ unsigned int numOfParticles;
__device__ __constant__ unsigned int hashBinsMaxSize;
__device__ __constant__ unsigned int hashBinsNum;
__device__ __constant__ unsigned int hashBinsNumHalf;
__device__ __constant__ double gridCellSize;
__device__ __constant__ unsigned int p1 = 73856093;
__device__ __constant__ unsigned int p2 = 19349663;
__device__ __constant__ unsigned int p3 = 83492791;
__device__ __constant__ double dt;
__device__ __constant__ double half_dt;
__device__ __constant__ double Kc;
__device__ __constant__ double3 gravity;
__device__ __constant__ double stiffness;
__device__ __constant__ double3 x_offset;
__device__ __constant__ double3 y_offset;
__device__ __constant__ double3 z_offset;
__device__ __constant__ double collisionThreshold;
__device__ __constant__ double lambda;
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Start of device helpers of CUDA kernels //////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
__device__ __forceinline__ double3 operator+(const double3& a, const double3& b)
{
double3 out;
out.x = a.x + b.x;
out.y = a.y + b.y;
out.z = a.z + b.z;
return out;
}
__device__ __forceinline__ double3 operator-(const double3& a, const double3& b)
{
double3 out;
out.x = a.x - b.x;
out.y = a.y - b.y;
out.z = a.z - b.z;
return out;
}
__device__ __forceinline__ double3 operator*(const double3& a, const double b)
{
double3 out;
out.x = a.x * b;
out.y = a.y * b;
out.z = a.z * b;
return out;
}
__device__ __forceinline__ double3 operator*(const double b, const double3& a)
{
double3 out;
out.x = a.x * b;
out.y = a.y * b;
out.z = a.z * b;
return out;
}
__device__ __forceinline__ double norm(const double3& v) {
return sqrt(v.x * v.x + v.y * v.y + v.z * v.z);
}
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// End device helpers CUDA kernels //////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Start of CUDA kernels ////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
__global__
void initializeKernel(
unsigned int* hashCounts)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < hashBinsNum; i += stride) {
hashCounts[i] = 0;
}
}
__device__
unsigned int spatialHash(
const double3& pos)
{
long long int i = floor(pos.x / gridCellSize);
long long int j = floor(pos.y / gridCellSize);
long long int k = floor(pos.z / gridCellSize);
return (((i * p1) ^ (j * p2) ^ (k ^ p3)) % hashBinsNumHalf) + hashBinsNumHalf;
}
__global__
void collectKernel(
double3* positions,
unsigned int* hashTable,
unsigned int* hashCounts)
{
// Fill the hash table
unsigned int hash;
unsigned int idx;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numOfParticles; i += stride) {
hash = spatialHash(positions[i]);
idx = hash * hashBinsMaxSize + atomicAdd(&hashCounts[hash], 1);
hashTable[idx] = i;
}
}
__global__
void detectCollisionsKernel(
double3* positions,
double3* forces,
unsigned int* hashTable,
unsigned int* hashCounts)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Collision detection
for (int i = index; i < numOfParticles; i += stride) {
// Get cells to check for each particle
unsigned int cellsToCheck[8];
unsigned int cellsToCheck_duplicates[8];
double3 position = positions[i];
// Hash all AABB vertices
cellsToCheck_duplicates[0] = spatialHash(position - x_offset - y_offset - z_offset);
cellsToCheck_duplicates[1] = spatialHash(position + x_offset - y_offset - z_offset);
cellsToCheck_duplicates[2] = spatialHash(position - x_offset + y_offset - z_offset);
cellsToCheck_duplicates[3] = spatialHash(position - x_offset - y_offset + z_offset);
cellsToCheck_duplicates[4] = spatialHash(position + x_offset + y_offset - z_offset);
cellsToCheck_duplicates[5] = spatialHash(position + x_offset - y_offset + z_offset);
cellsToCheck_duplicates[6] = spatialHash(position - x_offset + y_offset + z_offset);
cellsToCheck_duplicates[7] = spatialHash(position + x_offset + y_offset + z_offset);
unsigned int numCellsToCheck = 0;
bool dupl;
for (int i = 0; i < 8; ++i) {
dupl = false;
for (int j = 0; j < numCellsToCheck; ++j) {
if (cellsToCheck_duplicates[i] == cellsToCheck[j]) {
dupl = true;
break;
}
}
if (!dupl) {
cellsToCheck[numCellsToCheck++] = cellsToCheck_duplicates[i];
}
}
// Check all the cells - if they are colliding, compute response
unsigned int nextCell, start;
for (int j = 0; j < numCellsToCheck; ++j) {
nextCell = cellsToCheck[j];
start = nextCell * hashBinsMaxSize;
for (int k = start; k < start + hashCounts[nextCell]; ++k) {
if (hashTable[k] != i) {
double3 diff = positions[i] - positions[hashTable[k]];
double distance = norm(diff);
if (distance < 1e-9) continue;
if (distance < collisionThreshold) {
//printf("Particles %d and %d are colliding!\n", i, hashTable[k]);
forces[i] =
forces[i] + (Kc * pow(distance - collisionThreshold, 2) / distance) * diff;
}
}
}
}
}
}
__global__
void advanceVelocitiesKernel(
double3* velocities,
double3* forces,
double massInv)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numOfParticles; i += stride) {
velocities[i] = velocities[i] + half_dt * massInv * forces[i];
}
}
__global__
void advancePositionsKernel(
double3* positions,
double3* velocities)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numOfParticles; i += stride) {
positions[i] = positions[i] + dt * velocities[i];
}
}
__global__
void addBodyForcesKernel(
double3* positions,
double3* velocities,
double3* forces,
double particleMass)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numOfParticles; i += stride) {
if (positions[i].y < 0) {
// Restore position, otherwise particle might remain stuck under the floor (?)
//positions[i].y = 0;
// Reflect velocity
velocities[i].y *= -1;
}
forces[i] = particleMass * gravity;
}
}
__global__
void addSpringForcesKernel(
double3* positions,
double3* forces,
int* adjs,
unsigned int* adjsCounts,
unsigned int* adjsStarts,
double* restLengths,
double* taus)
{
int start, end;
double epsilon, distance;
double3 diff;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numOfParticles; i += stride) {
start = adjsStarts[i];
end = start + adjsCounts[i];
for (int j = start; j < end; ++j) {
if (adjs[j] != -1) {
diff = positions[i] - positions[adjs[j]];
distance = norm(diff);
if (distance <= 1e-9) continue;
epsilon = (distance / restLengths[j]) - 1;
if (epsilon > taus[i]) {
//printf("The spring between %d and %d broke!\n", i, adjs[j]);
adjs[j] = -1;
continue;
}
if (epsilon != 0) {
forces[i] = forces[i] + diff * (-1 * stiffness * epsilon / distance);
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// End of CUDA kernels //////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Start of simulation interface ////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
void initializeSimulationParameters(
unsigned int numOfParticles_host,
unsigned int hashBinsNum_host,
unsigned int hashBinsMaxSize_host,
double gridCellSize_host,
double dt_host,
double Kc_host,
double stiffness_host,
double avgtau_host,
double collisionThreshold_host,
double lambda_host,
thrust::device_vector<double3>& velocities,
thrust::device_vector<double3>& forces,
thrust::device_vector<double>& taus,
thrust::device_vector<unsigned int>& hashTable,
thrust::device_vector<unsigned int>& hashCounts)
{
hipMemcpyToSymbol(numOfParticles, &numOfParticles_host, sizeof(unsigned int));
hipMemcpyToSymbol(hashBinsMaxSize, &hashBinsMaxSize_host, sizeof(unsigned int));
hipMemcpyToSymbol(hashBinsNum, &hashBinsNum_host, sizeof(unsigned int));
unsigned int hashBinsNumHalf_host = hashBinsNum_host / 2;
hipMemcpyToSymbol(hashBinsNumHalf, &hashBinsNumHalf_host, sizeof(unsigned int));
hipMemcpyToSymbol(gridCellSize, &gridCellSize_host, sizeof(double));
hipMemcpyToSymbol(dt, &dt_host, sizeof(double));
double half_dt_host = dt_host / 2;
hipMemcpyToSymbol(half_dt, &half_dt_host, sizeof(double));
hipMemcpyToSymbol(Kc, &Kc_host, sizeof(double));
double3 gravity_host = make_double3(0, -9.81, 0);
hipMemcpyToSymbol(gravity, &gravity_host, sizeof(double3));
hipMemcpyToSymbol(stiffness, &stiffness_host, sizeof(double));
hipMemcpyToSymbol(collisionThreshold, &collisionThreshold_host, sizeof(double));
hipMemcpyToSymbol(lambda, &lambda_host, sizeof(double));
// Number of threads per block
blockSize = (numOfParticles_host > MaxBlockSize ? MaxBlockSize : numOfParticles_host);
// Number of blocks (to avoid overlapping)
numBlocks = (numOfParticles_host + blockSize - 1) / blockSize;
double3 x_offset_host = make_double3(lambda_host / 2, 0, 0);
double3 y_offset_host = make_double3(0, lambda_host / 2, 0);
double3 z_offset_host = make_double3(0, 0, lambda_host / 2);
hipMemcpyToSymbol(x_offset, &x_offset_host, sizeof(double3));
hipMemcpyToSymbol(y_offset, &y_offset_host, sizeof(double3));
hipMemcpyToSymbol(z_offset, &z_offset_host, sizeof(double3));
double3 zeroVector = make_double3(0, 0, 0);
velocities.resize(numOfParticles_host);
thrust::fill(thrust::device, velocities.begin(), velocities.end(), zeroVector);
forces.resize(numOfParticles_host);
thrust::fill(thrust::device, forces.begin(), forces.end(), zeroVector);
taus.resize(numOfParticles_host);
thrust::fill(thrust::device, taus.begin(), taus.end(), avgtau_host);
hashTable.resize(hashBinsMaxSize_host * hashBinsNum_host);
thrust::fill(thrust::device, hashTable.begin(), hashTable.end(), 0);
hashCounts.resize(hashBinsNum_host);
thrust::fill(thrust::device, hashCounts.begin(), hashCounts.end(), 0);
}
void iterate(
thrust::host_vector<double3>& positionsHost,
thrust::device_vector<double3>& positions,
thrust::device_vector<double3>& velocities,
thrust::device_vector<double3>& forces,
thrust::host_vector<int>& adjsHost,
thrust::device_vector<int>& adjs,
thrust::device_vector<unsigned int>& adjsCounts,
thrust::device_vector<unsigned int>& adjsStarts,
thrust::device_vector<double>& restLengths,
thrust::device_vector<double>& taus,
thrust::device_vector<unsigned int>& hashTable,
thrust::device_vector<unsigned int>& hashCounts,
unsigned int numberOfIterations,
double particleMass,
double particleMassInv)
{
// Get raw pointers to pass to kernels
unsigned int* hashTable_ptr = thrust::raw_pointer_cast(hashTable.data());
unsigned int* hashCounts_ptr = thrust::raw_pointer_cast(hashCounts.data());
double3* positions_ptr = thrust::raw_pointer_cast(positions.data());
double3* forces_ptr = thrust::raw_pointer_cast(forces.data());
double3* velocities_ptr = thrust::raw_pointer_cast(velocities.data());
int* adjs_ptr = thrust::raw_pointer_cast(adjs.data());
unsigned int* adjsCounts_ptr = thrust::raw_pointer_cast(adjsCounts.data());
unsigned int* adjsStarts_ptr = thrust::raw_pointer_cast(adjsStarts.data());
double* restLengths_ptr = thrust::raw_pointer_cast(restLengths.data());
double* taus_ptr = thrust::raw_pointer_cast(taus.data());
for (int i = 0; i < numberOfIterations; ++i) {
// Initialize hash bins for the next iteration
hipLaunchKernelGGL(( initializeKernel), dim3(numBlocks), dim3(blockSize), 0, 0, hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
// Collect - assign each particle to hash bin
hipLaunchKernelGGL(( collectKernel), dim3(numBlocks), dim3(blockSize), 0, 0, positions_ptr, hashTable_ptr, hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
// Detect collisions and compute response
hipLaunchKernelGGL(( detectCollisionsKernel), dim3(numBlocks), dim3(blockSize), 0, 0, positions_ptr, forces_ptr, hashTable_ptr, hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
// Advance velocities by half-time step (first Velocity-Verlet update)
hipLaunchKernelGGL(( advanceVelocitiesKernel), dim3(numBlocks), dim3(blockSize), 0, 0, velocities_ptr, forces_ptr, particleMassInv);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
// Advance positions by one timestep (second Velocity-Verlet update
hipLaunchKernelGGL(( advancePositionsKernel), dim3(numBlocks), dim3(blockSize), 0, 0, positions_ptr, velocities_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
// Add body forces: gravity + collision with the floor
hipLaunchKernelGGL(( addBodyForcesKernel), dim3(numBlocks), dim3(blockSize), 0, 0, positions_ptr, velocities_ptr, forces_ptr, particleMass);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
// Add spring forces
hipLaunchKernelGGL(( addSpringForcesKernel), dim3(numBlocks), dim3(blockSize), 0, 0,
positions_ptr, forces_ptr, adjs_ptr, adjsCounts_ptr, adjsStarts_ptr, restLengths_ptr, taus_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
// Advance velocities (third Velocitiy-Verlet update)
hipLaunchKernelGGL(( advanceVelocitiesKernel), dim3(numBlocks), dim3(blockSize), 0, 0, velocities_ptr, forces_ptr, particleMassInv);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
// Synchronize GPU and CPU before copying the data back
gpuErrchk(hipDeviceSynchronize());
thrust::copy(positions.begin(), positions.end(), positionsHost.begin());
thrust::copy(adjs.begin(), adjs.end(), adjsHost.begin());
}
void resetVelocitiesAndForces(
thrust::device_vector<double3>& velocities,
thrust::device_vector<double3>& forces
)
{
double3 zeroVector = make_double3(0, 0, 0);
thrust::fill(thrust::device, velocities.begin(), velocities.end(), zeroVector);
thrust::fill(thrust::device, forces.begin(), forces.end(), zeroVector);
}
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// End of simulation interface //////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Start of kernel unit-testing helpers /////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
void* allocateDeviceMemory(unsigned int size)
{
void* ptr;
hipMalloc(&ptr, size);
return ptr;
}
void freeDeviceMemory(void* ptr) {
hipFree(ptr);
}
void copyToDevice(void* devPtr, void* dataPtr, unsigned int size) {
hipMemcpy(devPtr, dataPtr, size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
}
void copyFromDevice(void* dataPtr, void* devPtr, unsigned int size) {
hipMemcpy(dataPtr, devPtr, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
}
void initialize(
thrust::device_vector<unsigned int>& hashCounts)
{
unsigned int* hashCounts_ptr = thrust::raw_pointer_cast(&hashCounts[0]);
hipLaunchKernelGGL(( initializeKernel), dim3(numBlocks), dim3(blockSize), 0, 0, hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void collect(
thrust::device_vector<double3>& positions,
thrust::device_vector<unsigned int>& hashTable,
thrust::device_vector<unsigned int>& hashCounts)
{
double3* positions_ptr = thrust::raw_pointer_cast(&positions[0]);
unsigned int* hashTable_ptr = thrust::raw_pointer_cast(&hashTable[0]);
unsigned int* hashCounts_ptr = thrust::raw_pointer_cast(&hashCounts[0]);
hipLaunchKernelGGL(( collectKernel), dim3(numBlocks), dim3(blockSize), 0, 0, positions_ptr, hashTable_ptr, hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void detectCollisions(
thrust::device_vector<double3>& positions,
thrust::device_vector<double3>& forces,
thrust::device_vector<unsigned int>& hashTable,
thrust::device_vector<unsigned int>& hashCounts)
{
double3* positions_ptr = thrust::raw_pointer_cast(positions.data());
double3* forces_ptr = thrust::raw_pointer_cast(forces.data());
unsigned int* hashTable_ptr = thrust::raw_pointer_cast(hashTable.data());
unsigned int* hashCounts_ptr = thrust::raw_pointer_cast(hashCounts.data());
hipLaunchKernelGGL(( detectCollisionsKernel), dim3(numBlocks), dim3(blockSize), 0, 0, positions_ptr, forces_ptr, hashTable_ptr, hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void advanceVelocities(
thrust::device_vector<double3>& velocities,
thrust::device_vector<double3>& forces,
double massInv)
{
double3* velocities_ptr = thrust::raw_pointer_cast(&velocities[0]);
double3* forces_ptr = thrust::raw_pointer_cast(&forces[0]);
hipLaunchKernelGGL(( advanceVelocitiesKernel), dim3(numBlocks), dim3(blockSize), 0, 0, velocities_ptr, forces_ptr, massInv);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void advancePositions(
thrust::device_vector<double3>& positions,
thrust::device_vector<double3>& velocities)
{
double3* positions_ptr = thrust::raw_pointer_cast(&positions[0]);
double3* velocities_ptr = thrust::raw_pointer_cast(&velocities[0]);
hipLaunchKernelGGL(( advancePositionsKernel), dim3(numBlocks), dim3(blockSize), 0, 0, positions_ptr, velocities_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void addBodyForces(
thrust::device_vector<double3>& positions,
thrust::device_vector<double3>& velocities,
thrust::device_vector<double3>& forces,
double mass)
{
double3* positions_ptr = thrust::raw_pointer_cast(&positions[0]);
double3* velocities_ptr = thrust::raw_pointer_cast(&velocities[0]);
double3* forces_ptr = thrust::raw_pointer_cast(&forces[0]);
hipLaunchKernelGGL(( addBodyForcesKernel), dim3(numBlocks), dim3(blockSize), 0, 0, positions_ptr, velocities_ptr, forces_ptr, mass);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void addSpringForces(
thrust::device_vector<double3>& positions,
thrust::device_vector<double3>& forces,
thrust::device_vector<int>& adjs,
thrust::device_vector<unsigned int>& adjsCounts,
thrust::device_vector<unsigned int>& adjsStarts,
thrust::device_vector<double>& restLengths,
thrust::device_vector<double>& taus)
{
double3* positions_ptr = thrust::raw_pointer_cast(&positions[0]);
double3* forces_ptr = thrust::raw_pointer_cast(&forces[0]);
int* adjs_ptr = thrust::raw_pointer_cast(&adjs[0]);
unsigned int* adjsCounts_ptr = thrust::raw_pointer_cast(&adjsCounts[0]);
unsigned int* adjsStarts_ptr = thrust::raw_pointer_cast(&adjsStarts[0]);
double* restLengths_ptr = thrust::raw_pointer_cast(&restLengths[0]);
double* taus_ptr = thrust::raw_pointer_cast(&taus[0]);
hipLaunchKernelGGL(( addSpringForcesKernel), dim3(numBlocks), dim3(blockSize), 0, 0,
positions_ptr, forces_ptr, adjs_ptr, adjsCounts_ptr, adjsStarts_ptr, restLengths_ptr, taus_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// End of kernel unit-testing helpers ///////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
} | e509aa9df704d95b5aa23dea3e0956793609a58e.cu | #include "CudaSupport.h"
#include "stdio.h"
// GPU Error check helper
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPU ASSERT: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
namespace CudaSupport {
const unsigned int MaxBlockSize = 512;
unsigned int blockSize;
unsigned int numBlocks;
// Device constants - initialized once for usage
__device__ __constant__ unsigned int numOfParticles;
__device__ __constant__ unsigned int hashBinsMaxSize;
__device__ __constant__ unsigned int hashBinsNum;
__device__ __constant__ unsigned int hashBinsNumHalf;
__device__ __constant__ double gridCellSize;
__device__ __constant__ unsigned int p1 = 73856093;
__device__ __constant__ unsigned int p2 = 19349663;
__device__ __constant__ unsigned int p3 = 83492791;
__device__ __constant__ double dt;
__device__ __constant__ double half_dt;
__device__ __constant__ double Kc;
__device__ __constant__ double3 gravity;
__device__ __constant__ double stiffness;
__device__ __constant__ double3 x_offset;
__device__ __constant__ double3 y_offset;
__device__ __constant__ double3 z_offset;
__device__ __constant__ double collisionThreshold;
__device__ __constant__ double lambda;
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Start of device helpers of CUDA kernels //////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
__device__ __forceinline__ double3 operator+(const double3& a, const double3& b)
{
double3 out;
out.x = a.x + b.x;
out.y = a.y + b.y;
out.z = a.z + b.z;
return out;
}
__device__ __forceinline__ double3 operator-(const double3& a, const double3& b)
{
double3 out;
out.x = a.x - b.x;
out.y = a.y - b.y;
out.z = a.z - b.z;
return out;
}
__device__ __forceinline__ double3 operator*(const double3& a, const double b)
{
double3 out;
out.x = a.x * b;
out.y = a.y * b;
out.z = a.z * b;
return out;
}
__device__ __forceinline__ double3 operator*(const double b, const double3& a)
{
double3 out;
out.x = a.x * b;
out.y = a.y * b;
out.z = a.z * b;
return out;
}
__device__ __forceinline__ double norm(const double3& v) {
return sqrt(v.x * v.x + v.y * v.y + v.z * v.z);
}
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// End device helpers CUDA kernels //////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Start of CUDA kernels ////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
__global__
void initializeKernel(
unsigned int* hashCounts)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < hashBinsNum; i += stride) {
hashCounts[i] = 0;
}
}
__device__
unsigned int spatialHash(
const double3& pos)
{
long long int i = floor(pos.x / gridCellSize);
long long int j = floor(pos.y / gridCellSize);
long long int k = floor(pos.z / gridCellSize);
return (((i * p1) ^ (j * p2) ^ (k ^ p3)) % hashBinsNumHalf) + hashBinsNumHalf;
}
__global__
void collectKernel(
double3* positions,
unsigned int* hashTable,
unsigned int* hashCounts)
{
// Fill the hash table
unsigned int hash;
unsigned int idx;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numOfParticles; i += stride) {
hash = spatialHash(positions[i]);
idx = hash * hashBinsMaxSize + atomicAdd(&hashCounts[hash], 1);
hashTable[idx] = i;
}
}
__global__
void detectCollisionsKernel(
double3* positions,
double3* forces,
unsigned int* hashTable,
unsigned int* hashCounts)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Collision detection
for (int i = index; i < numOfParticles; i += stride) {
// Get cells to check for each particle
unsigned int cellsToCheck[8];
unsigned int cellsToCheck_duplicates[8];
double3 position = positions[i];
// Hash all AABB vertices
cellsToCheck_duplicates[0] = spatialHash(position - x_offset - y_offset - z_offset);
cellsToCheck_duplicates[1] = spatialHash(position + x_offset - y_offset - z_offset);
cellsToCheck_duplicates[2] = spatialHash(position - x_offset + y_offset - z_offset);
cellsToCheck_duplicates[3] = spatialHash(position - x_offset - y_offset + z_offset);
cellsToCheck_duplicates[4] = spatialHash(position + x_offset + y_offset - z_offset);
cellsToCheck_duplicates[5] = spatialHash(position + x_offset - y_offset + z_offset);
cellsToCheck_duplicates[6] = spatialHash(position - x_offset + y_offset + z_offset);
cellsToCheck_duplicates[7] = spatialHash(position + x_offset + y_offset + z_offset);
unsigned int numCellsToCheck = 0;
bool dupl;
for (int i = 0; i < 8; ++i) {
dupl = false;
for (int j = 0; j < numCellsToCheck; ++j) {
if (cellsToCheck_duplicates[i] == cellsToCheck[j]) {
dupl = true;
break;
}
}
if (!dupl) {
cellsToCheck[numCellsToCheck++] = cellsToCheck_duplicates[i];
}
}
// Check all the cells - if they are colliding, compute response
unsigned int nextCell, start;
for (int j = 0; j < numCellsToCheck; ++j) {
nextCell = cellsToCheck[j];
start = nextCell * hashBinsMaxSize;
for (int k = start; k < start + hashCounts[nextCell]; ++k) {
if (hashTable[k] != i) {
double3 diff = positions[i] - positions[hashTable[k]];
double distance = norm(diff);
if (distance < 1e-9) continue;
if (distance < collisionThreshold) {
//printf("Particles %d and %d are colliding!\n", i, hashTable[k]);
forces[i] =
forces[i] + (Kc * pow(distance - collisionThreshold, 2) / distance) * diff;
}
}
}
}
}
}
__global__
void advanceVelocitiesKernel(
double3* velocities,
double3* forces,
double massInv)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numOfParticles; i += stride) {
velocities[i] = velocities[i] + half_dt * massInv * forces[i];
}
}
__global__
void advancePositionsKernel(
double3* positions,
double3* velocities)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numOfParticles; i += stride) {
positions[i] = positions[i] + dt * velocities[i];
}
}
__global__
void addBodyForcesKernel(
double3* positions,
double3* velocities,
double3* forces,
double particleMass)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numOfParticles; i += stride) {
if (positions[i].y < 0) {
// Restore position, otherwise particle might remain stuck under the floor (?)
//positions[i].y = 0;
// Reflect velocity
velocities[i].y *= -1;
}
forces[i] = particleMass * gravity;
}
}
__global__
void addSpringForcesKernel(
double3* positions,
double3* forces,
int* adjs,
unsigned int* adjsCounts,
unsigned int* adjsStarts,
double* restLengths,
double* taus)
{
int start, end;
double epsilon, distance;
double3 diff;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numOfParticles; i += stride) {
start = adjsStarts[i];
end = start + adjsCounts[i];
for (int j = start; j < end; ++j) {
if (adjs[j] != -1) {
diff = positions[i] - positions[adjs[j]];
distance = norm(diff);
if (distance <= 1e-9) continue;
epsilon = (distance / restLengths[j]) - 1;
if (epsilon > taus[i]) {
//printf("The spring between %d and %d broke!\n", i, adjs[j]);
adjs[j] = -1;
continue;
}
if (epsilon != 0) {
forces[i] = forces[i] + diff * (-1 * stiffness * epsilon / distance);
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// End of CUDA kernels //////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Start of simulation interface ////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
void initializeSimulationParameters(
unsigned int numOfParticles_host,
unsigned int hashBinsNum_host,
unsigned int hashBinsMaxSize_host,
double gridCellSize_host,
double dt_host,
double Kc_host,
double stiffness_host,
double avgtau_host,
double collisionThreshold_host,
double lambda_host,
thrust::device_vector<double3>& velocities,
thrust::device_vector<double3>& forces,
thrust::device_vector<double>& taus,
thrust::device_vector<unsigned int>& hashTable,
thrust::device_vector<unsigned int>& hashCounts)
{
cudaMemcpyToSymbol(numOfParticles, &numOfParticles_host, sizeof(unsigned int));
cudaMemcpyToSymbol(hashBinsMaxSize, &hashBinsMaxSize_host, sizeof(unsigned int));
cudaMemcpyToSymbol(hashBinsNum, &hashBinsNum_host, sizeof(unsigned int));
unsigned int hashBinsNumHalf_host = hashBinsNum_host / 2;
cudaMemcpyToSymbol(hashBinsNumHalf, &hashBinsNumHalf_host, sizeof(unsigned int));
cudaMemcpyToSymbol(gridCellSize, &gridCellSize_host, sizeof(double));
cudaMemcpyToSymbol(dt, &dt_host, sizeof(double));
double half_dt_host = dt_host / 2;
cudaMemcpyToSymbol(half_dt, &half_dt_host, sizeof(double));
cudaMemcpyToSymbol(Kc, &Kc_host, sizeof(double));
double3 gravity_host = make_double3(0, -9.81, 0);
cudaMemcpyToSymbol(gravity, &gravity_host, sizeof(double3));
cudaMemcpyToSymbol(stiffness, &stiffness_host, sizeof(double));
cudaMemcpyToSymbol(collisionThreshold, &collisionThreshold_host, sizeof(double));
cudaMemcpyToSymbol(lambda, &lambda_host, sizeof(double));
// Number of threads per block
blockSize = (numOfParticles_host > MaxBlockSize ? MaxBlockSize : numOfParticles_host);
// Number of blocks (to avoid overlapping)
numBlocks = (numOfParticles_host + blockSize - 1) / blockSize;
double3 x_offset_host = make_double3(lambda_host / 2, 0, 0);
double3 y_offset_host = make_double3(0, lambda_host / 2, 0);
double3 z_offset_host = make_double3(0, 0, lambda_host / 2);
cudaMemcpyToSymbol(x_offset, &x_offset_host, sizeof(double3));
cudaMemcpyToSymbol(y_offset, &y_offset_host, sizeof(double3));
cudaMemcpyToSymbol(z_offset, &z_offset_host, sizeof(double3));
double3 zeroVector = make_double3(0, 0, 0);
velocities.resize(numOfParticles_host);
thrust::fill(thrust::device, velocities.begin(), velocities.end(), zeroVector);
forces.resize(numOfParticles_host);
thrust::fill(thrust::device, forces.begin(), forces.end(), zeroVector);
taus.resize(numOfParticles_host);
thrust::fill(thrust::device, taus.begin(), taus.end(), avgtau_host);
hashTable.resize(hashBinsMaxSize_host * hashBinsNum_host);
thrust::fill(thrust::device, hashTable.begin(), hashTable.end(), 0);
hashCounts.resize(hashBinsNum_host);
thrust::fill(thrust::device, hashCounts.begin(), hashCounts.end(), 0);
}
void iterate(
thrust::host_vector<double3>& positionsHost,
thrust::device_vector<double3>& positions,
thrust::device_vector<double3>& velocities,
thrust::device_vector<double3>& forces,
thrust::host_vector<int>& adjsHost,
thrust::device_vector<int>& adjs,
thrust::device_vector<unsigned int>& adjsCounts,
thrust::device_vector<unsigned int>& adjsStarts,
thrust::device_vector<double>& restLengths,
thrust::device_vector<double>& taus,
thrust::device_vector<unsigned int>& hashTable,
thrust::device_vector<unsigned int>& hashCounts,
unsigned int numberOfIterations,
double particleMass,
double particleMassInv)
{
// Get raw pointers to pass to kernels
unsigned int* hashTable_ptr = thrust::raw_pointer_cast(hashTable.data());
unsigned int* hashCounts_ptr = thrust::raw_pointer_cast(hashCounts.data());
double3* positions_ptr = thrust::raw_pointer_cast(positions.data());
double3* forces_ptr = thrust::raw_pointer_cast(forces.data());
double3* velocities_ptr = thrust::raw_pointer_cast(velocities.data());
int* adjs_ptr = thrust::raw_pointer_cast(adjs.data());
unsigned int* adjsCounts_ptr = thrust::raw_pointer_cast(adjsCounts.data());
unsigned int* adjsStarts_ptr = thrust::raw_pointer_cast(adjsStarts.data());
double* restLengths_ptr = thrust::raw_pointer_cast(restLengths.data());
double* taus_ptr = thrust::raw_pointer_cast(taus.data());
for (int i = 0; i < numberOfIterations; ++i) {
// Initialize hash bins for the next iteration
initializeKernel<<<numBlocks, blockSize>>>(hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// Collect - assign each particle to hash bin
collectKernel<<<numBlocks, blockSize>>>(positions_ptr, hashTable_ptr, hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// Detect collisions and compute response
detectCollisionsKernel<<<numBlocks, blockSize>>>(positions_ptr, forces_ptr, hashTable_ptr, hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// Advance velocities by half-time step (first Velocity-Verlet update)
advanceVelocitiesKernel<<<numBlocks, blockSize>>>(velocities_ptr, forces_ptr, particleMassInv);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// Advance positions by one timestep (second Velocity-Verlet update
advancePositionsKernel<<<numBlocks, blockSize>>>(positions_ptr, velocities_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// Add body forces: gravity + collision with the floor
addBodyForcesKernel<<<numBlocks, blockSize>>>(positions_ptr, velocities_ptr, forces_ptr, particleMass);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// Add spring forces
addSpringForcesKernel<<<numBlocks, blockSize>>>(
positions_ptr, forces_ptr, adjs_ptr, adjsCounts_ptr, adjsStarts_ptr, restLengths_ptr, taus_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// Advance velocities (third Velocitiy-Verlet update)
advanceVelocitiesKernel<<<numBlocks, blockSize>>>(velocities_ptr, forces_ptr, particleMassInv);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
// Synchronize GPU and CPU before copying the data back
gpuErrchk(cudaDeviceSynchronize());
thrust::copy(positions.begin(), positions.end(), positionsHost.begin());
thrust::copy(adjs.begin(), adjs.end(), adjsHost.begin());
}
void resetVelocitiesAndForces(
thrust::device_vector<double3>& velocities,
thrust::device_vector<double3>& forces
)
{
double3 zeroVector = make_double3(0, 0, 0);
thrust::fill(thrust::device, velocities.begin(), velocities.end(), zeroVector);
thrust::fill(thrust::device, forces.begin(), forces.end(), zeroVector);
}
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// End of simulation interface //////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Start of kernel unit-testing helpers /////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
void* allocateDeviceMemory(unsigned int size)
{
void* ptr;
cudaMalloc(&ptr, size);
return ptr;
}
void freeDeviceMemory(void* ptr) {
cudaFree(ptr);
}
void copyToDevice(void* devPtr, void* dataPtr, unsigned int size) {
cudaMemcpy(devPtr, dataPtr, size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
void copyFromDevice(void* dataPtr, void* devPtr, unsigned int size) {
cudaMemcpy(dataPtr, devPtr, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
void initialize(
thrust::device_vector<unsigned int>& hashCounts)
{
unsigned int* hashCounts_ptr = thrust::raw_pointer_cast(&hashCounts[0]);
initializeKernel<<<numBlocks, blockSize>>>(hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void collect(
thrust::device_vector<double3>& positions,
thrust::device_vector<unsigned int>& hashTable,
thrust::device_vector<unsigned int>& hashCounts)
{
double3* positions_ptr = thrust::raw_pointer_cast(&positions[0]);
unsigned int* hashTable_ptr = thrust::raw_pointer_cast(&hashTable[0]);
unsigned int* hashCounts_ptr = thrust::raw_pointer_cast(&hashCounts[0]);
collectKernel<<<numBlocks, blockSize>>>(positions_ptr, hashTable_ptr, hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void detectCollisions(
thrust::device_vector<double3>& positions,
thrust::device_vector<double3>& forces,
thrust::device_vector<unsigned int>& hashTable,
thrust::device_vector<unsigned int>& hashCounts)
{
double3* positions_ptr = thrust::raw_pointer_cast(positions.data());
double3* forces_ptr = thrust::raw_pointer_cast(forces.data());
unsigned int* hashTable_ptr = thrust::raw_pointer_cast(hashTable.data());
unsigned int* hashCounts_ptr = thrust::raw_pointer_cast(hashCounts.data());
detectCollisionsKernel<<<numBlocks, blockSize>>>(positions_ptr, forces_ptr, hashTable_ptr, hashCounts_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void advanceVelocities(
thrust::device_vector<double3>& velocities,
thrust::device_vector<double3>& forces,
double massInv)
{
double3* velocities_ptr = thrust::raw_pointer_cast(&velocities[0]);
double3* forces_ptr = thrust::raw_pointer_cast(&forces[0]);
advanceVelocitiesKernel<<<numBlocks, blockSize>>>(velocities_ptr, forces_ptr, massInv);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void advancePositions(
thrust::device_vector<double3>& positions,
thrust::device_vector<double3>& velocities)
{
double3* positions_ptr = thrust::raw_pointer_cast(&positions[0]);
double3* velocities_ptr = thrust::raw_pointer_cast(&velocities[0]);
advancePositionsKernel<<<numBlocks, blockSize>>>(positions_ptr, velocities_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void addBodyForces(
thrust::device_vector<double3>& positions,
thrust::device_vector<double3>& velocities,
thrust::device_vector<double3>& forces,
double mass)
{
double3* positions_ptr = thrust::raw_pointer_cast(&positions[0]);
double3* velocities_ptr = thrust::raw_pointer_cast(&velocities[0]);
double3* forces_ptr = thrust::raw_pointer_cast(&forces[0]);
addBodyForcesKernel<<<numBlocks, blockSize>>>(positions_ptr, velocities_ptr, forces_ptr, mass);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void addSpringForces(
thrust::device_vector<double3>& positions,
thrust::device_vector<double3>& forces,
thrust::device_vector<int>& adjs,
thrust::device_vector<unsigned int>& adjsCounts,
thrust::device_vector<unsigned int>& adjsStarts,
thrust::device_vector<double>& restLengths,
thrust::device_vector<double>& taus)
{
double3* positions_ptr = thrust::raw_pointer_cast(&positions[0]);
double3* forces_ptr = thrust::raw_pointer_cast(&forces[0]);
int* adjs_ptr = thrust::raw_pointer_cast(&adjs[0]);
unsigned int* adjsCounts_ptr = thrust::raw_pointer_cast(&adjsCounts[0]);
unsigned int* adjsStarts_ptr = thrust::raw_pointer_cast(&adjsStarts[0]);
double* restLengths_ptr = thrust::raw_pointer_cast(&restLengths[0]);
double* taus_ptr = thrust::raw_pointer_cast(&taus[0]);
addSpringForcesKernel<<<numBlocks, blockSize>>>(
positions_ptr, forces_ptr, adjs_ptr, adjsCounts_ptr, adjsStarts_ptr, restLengths_ptr, taus_ptr);
#if ERRCHECK_AND_SYNC
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// End of kernel unit-testing helpers ///////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
} |
cd31d877bb21703e06415c365d6278a9fa2fbe09.hip | // !!! This is a file automatically generated by hipify!!!
/*
!=====================================================================
!
! S p e c f e m 3 D V e r s i o n 2 . 1
! ---------------------------------------
!
! Main authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA and CNRS / INRIA / University of Pau
! (c) Princeton University / California Institute of Technology and CNRS / INRIA / University of Pau
! July 2012
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "config.h"
#include "mesh_constants_cuda.h"
// #include "epik_user.h"
/* ----------------------------------------------------------------------------------------------- */
// acoustic sources
/* ----------------------------------------------------------------------------------------------- */
__global__ void compute_add_sources_acoustic_kernel(realw* potential_dot_dot_acoustic,
int* ibool,
int* ispec_is_inner,
int phase_is_inner,
realw* sourcearrays,
double* stf_pre_compute,
int myrank,
int* islice_selected_source,
int* ispec_selected_source,
int* ispec_is_acoustic,
realw* kappastore,
int NSOURCES) {
int i = threadIdx.x;
int j = threadIdx.y;
int k = threadIdx.z;
int isource = blockIdx.x + gridDim.x*blockIdx.y; // bx
int ispec;
int iglob;
realw stf;
realw kappal;
if( isource < NSOURCES ){
if(myrank == islice_selected_source[isource]) {
ispec = ispec_selected_source[isource]-1;
if(ispec_is_inner[ispec] == phase_is_inner && ispec_is_acoustic[ispec] ) {
stf = (realw) stf_pre_compute[isource];
iglob = ibool[INDEX4(5,5,5,i,j,k,ispec)]-1;
kappal = kappastore[INDEX4(5,5,5,i,j,k,ispec)];
atomicAdd(&potential_dot_dot_acoustic[iglob],
-sourcearrays[INDEX5(NSOURCES, 3, 5, 5,isource, 0, i,j,k)]*stf/kappal);
// potential_dot_dot_acoustic[iglob] +=
// -sourcearrays[INDEX5(NSOURCES, 3, 5, 5,isource, 0, i,j,k)]*stf/kappal;
}
}
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(compute_add_sources_ac_cuda,
COMPUTE_ADD_SOURCES_AC_CUDA)(long* Mesh_pointer_f,
int* phase_is_innerf,
int* NSOURCESf,
double* h_stf_pre_compute,
int* myrankf) {
TRACE("compute_add_sources_ac_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer_f); //get mesh pointer out of fortran integer container
// check if anything to do
if( mp->nsources_local == 0 ) return;
int phase_is_inner = *phase_is_innerf;
int NSOURCES = *NSOURCESf;
int myrank = *myrankf;
int num_blocks_x = NSOURCES;
int num_blocks_y = 1;
while(num_blocks_x > 65535) {
num_blocks_x = (int) ceil(num_blocks_x*0.5f);
num_blocks_y = num_blocks_y*2;
}
// copies pre-computed source time factors onto GPU
print_CUDA_error_if_any(hipMemcpy(mp->d_stf_pre_compute,h_stf_pre_compute,
NSOURCES*sizeof(double),hipMemcpyHostToDevice),18);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(5,5,5);
hipLaunchKernelGGL(( compute_add_sources_acoustic_kernel), dim3(grid),dim3(threads), 0, 0, mp->d_potential_dot_dot_acoustic,
mp->d_ibool,
mp->d_ispec_is_inner,
phase_is_inner,
mp->d_sourcearrays,
mp->d_stf_pre_compute,
myrank,
mp->d_islice_selected_source,
mp->d_ispec_selected_source,
mp->d_ispec_is_acoustic,
mp->d_kappastore,
NSOURCES);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("compute_add_sources_ac_cuda");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(compute_add_sources_ac_s3_cuda,
COMPUTE_ADD_SOURCES_AC_s3_CUDA)(long* Mesh_pointer_f,
int* phase_is_innerf,
int* NSOURCESf,
double* h_stf_pre_compute,
int* myrankf) {
TRACE("compute_add_sources_ac_s3_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer_f); //get mesh pointer out of fortran integer container
// check if anything to do
if( mp->nsources_local == 0 ) return;
int phase_is_inner = *phase_is_innerf;
int NSOURCES = *NSOURCESf;
int myrank = *myrankf;
int num_blocks_x = NSOURCES;
int num_blocks_y = 1;
while(num_blocks_x > 65535) {
num_blocks_x = (int) ceil(num_blocks_x*0.5f);
num_blocks_y = num_blocks_y*2;
}
// copies source time factors onto GPU
print_CUDA_error_if_any(hipMemcpy(mp->d_stf_pre_compute,h_stf_pre_compute,
NSOURCES*sizeof(double),hipMemcpyHostToDevice),18);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(5,5,5);
hipLaunchKernelGGL(( compute_add_sources_acoustic_kernel), dim3(grid),dim3(threads), 0, 0, mp->d_b_potential_dot_dot_acoustic,
mp->d_ibool,
mp->d_ispec_is_inner,
phase_is_inner,
mp->d_sourcearrays,
mp->d_stf_pre_compute,
myrank,
mp->d_islice_selected_source,
mp->d_ispec_selected_source,
mp->d_ispec_is_acoustic,
mp->d_kappastore,
NSOURCES);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("compute_add_sources_ac_s3_cuda");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// acoustic adjoint sources
/* ----------------------------------------------------------------------------------------------- */
__global__ void add_sources_ac_SIM_TYPE_2_OR_3_kernel(realw* potential_dot_dot_acoustic,
int nrec,
realw* adj_sourcearrays,
int* ibool,
int* ispec_is_inner,
int* ispec_is_acoustic,
int* ispec_selected_rec,
int phase_is_inner,
int* pre_computed_irec,
int nadj_rec_local,
realw* kappastore) {
int irec_local = blockIdx.x + gridDim.x*blockIdx.y;
// because of grid shape, irec_local can be too big
if(irec_local < nadj_rec_local) {
int irec = pre_computed_irec[irec_local];
int ispec = ispec_selected_rec[irec]-1;
if( ispec_is_acoustic[ispec] ){
// checks if element is in phase_is_inner run
if(ispec_is_inner[ispec] == phase_is_inner) {
int i = threadIdx.x;
int j = threadIdx.y;
int k = threadIdx.z;
int iglob = ibool[INDEX4(5,5,5,i,j,k,ispec)]-1;
//kappal = kappastore[INDEX4(5,5,5,i,j,k,ispec)];
//potential_dot_dot_acoustic[iglob] += adj_sourcearrays[INDEX6(nadj_rec_local,NTSTEP_BETWEEN_ADJSRC,3,5,5,
// pre_computed_irec_local_index[irec],
// pre_computed_index,
// 0,
// i,j,k)]/kappal;
// beware, for acoustic medium, a pressure source would be taking the negative
// and divide by Kappa of the fluid;
// this would have to be done when constructing the adjoint source.
//
// note: we take the first component of the adj_sourcearrays
// the idea is to have e.g. a pressure source, where all 3 components would be the same
realw stf = adj_sourcearrays[INDEX5(5,5,5,3,i,j,k,0,irec_local)]; // / kappal
atomicAdd(&potential_dot_dot_acoustic[iglob],stf);
//+adj_sourcearrays[INDEX6(nadj_rec_local,NTSTEP_BETWEEN_ADJSRC,3,5,5,
// pre_computed_irec_local_index[irec],pre_computed_index-1,
// 0,i,j,k)] // / kappal
// );
}
}
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(add_sources_ac_sim_2_or_3_cuda,
ADD_SOURCES_AC_SIM_2_OR_3_CUDA)(long* Mesh_pointer,
realw* h_adj_sourcearrays,
int* phase_is_inner,
int* h_ispec_is_inner,
int* h_ispec_is_acoustic,
int* h_ispec_selected_rec,
int* myrank,
int* nrec,
int* time_index,
int* h_islice_selected_rec,
int* nadj_rec_local,
int* NTSTEP_BETWEEN_READ_ADJSRC) {
TRACE("add_sources_ac_sim_2_or_3_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
// checks
if( *nadj_rec_local != mp->nadj_rec_local) exit_on_cuda_error("add_sources_ac_sim_type_2_or_3: nadj_rec_local not equal\n");
// make sure grid dimension is less than 65535 in x dimension
int num_blocks_x = mp->nadj_rec_local;
int num_blocks_y = 1;
while(num_blocks_x > 65535) {
num_blocks_x = (int) ceil(num_blocks_x*0.5f);
num_blocks_y = num_blocks_y*2;
}
dim3 grid(num_blocks_x,num_blocks_y,1);
dim3 threads(5,5,5);
// build slice of adj_sourcearrays because full array is *very* large.
// note: this extracts array values for local adjoint sources at given time step "time_index"
// from large adj_sourcearrays array into h_adj_sourcearrays_slice
int ispec,i,j,k;
int irec_local = 0;
for(int irec = 0; irec < *nrec; irec++) {
if(*myrank == h_islice_selected_rec[irec]) {
irec_local++;
// takes only acoustic sources
ispec = h_ispec_selected_rec[irec]-1;
if( h_ispec_is_acoustic[ispec] ){
if( h_ispec_is_inner[ispec] == *phase_is_inner) {
for(k=0;k<5;k++) {
for(j=0;j<5;j++) {
for(i=0;i<5;i++) {
mp->h_adj_sourcearrays_slice[INDEX5(5,5,5,3,i,j,k,0,irec_local-1)]
= h_adj_sourcearrays[INDEX6(mp->nadj_rec_local,
*NTSTEP_BETWEEN_READ_ADJSRC,
3,5,5,
irec_local-1,(*time_index)-1,
0,i,j,k)];
mp->h_adj_sourcearrays_slice[INDEX5(5,5,5,3,i,j,k,1,irec_local-1)]
= h_adj_sourcearrays[INDEX6(mp->nadj_rec_local,
*NTSTEP_BETWEEN_READ_ADJSRC,
3,5,5,
irec_local-1,(*time_index)-1,
1,i,j,k)];
mp->h_adj_sourcearrays_slice[INDEX5(5,5,5,3,i,j,k,2,irec_local-1)]
= h_adj_sourcearrays[INDEX6(mp->nadj_rec_local,
*NTSTEP_BETWEEN_READ_ADJSRC,
3,5,5,
irec_local-1,(*time_index)-1,
2,i,j,k)];
}
}
}
} // phase_is_inner
} // h_ispec_is_acoustic
}
}
// check all local sources were added
if( irec_local != mp->nadj_rec_local) exit_on_error("irec_local not equal to nadj_rec_local\n");
// copies extracted array values onto GPU
print_CUDA_error_if_any(hipMemcpy(mp->d_adj_sourcearrays, mp->h_adj_sourcearrays_slice,
(mp->nadj_rec_local)*3*NGLL3*sizeof(realw),hipMemcpyHostToDevice),99099);
// launches cuda kernel for acoustic adjoint sources
hipLaunchKernelGGL(( add_sources_ac_SIM_TYPE_2_OR_3_kernel), dim3(grid),dim3(threads), 0, 0, mp->d_potential_dot_dot_acoustic,
*nrec,
mp->d_adj_sourcearrays,
mp->d_ibool,
mp->d_ispec_is_inner,
mp->d_ispec_is_acoustic,
mp->d_ispec_selected_rec,
*phase_is_inner,
mp->d_pre_computed_irec,
mp->nadj_rec_local,
mp->d_kappastore);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("add_sources_acoustic_SIM_TYPE_2_OR_3_kernel");
#endif
}
| cd31d877bb21703e06415c365d6278a9fa2fbe09.cu | /*
!=====================================================================
!
! S p e c f e m 3 D V e r s i o n 2 . 1
! ---------------------------------------
!
! Main authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA and CNRS / INRIA / University of Pau
! (c) Princeton University / California Institute of Technology and CNRS / INRIA / University of Pau
! July 2012
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#include <stdio.h>
#include <cuda.h>
#include <cublas.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "config.h"
#include "mesh_constants_cuda.h"
// #include "epik_user.h"
/* ----------------------------------------------------------------------------------------------- */
// acoustic sources
/* ----------------------------------------------------------------------------------------------- */
__global__ void compute_add_sources_acoustic_kernel(realw* potential_dot_dot_acoustic,
int* ibool,
int* ispec_is_inner,
int phase_is_inner,
realw* sourcearrays,
double* stf_pre_compute,
int myrank,
int* islice_selected_source,
int* ispec_selected_source,
int* ispec_is_acoustic,
realw* kappastore,
int NSOURCES) {
int i = threadIdx.x;
int j = threadIdx.y;
int k = threadIdx.z;
int isource = blockIdx.x + gridDim.x*blockIdx.y; // bx
int ispec;
int iglob;
realw stf;
realw kappal;
if( isource < NSOURCES ){
if(myrank == islice_selected_source[isource]) {
ispec = ispec_selected_source[isource]-1;
if(ispec_is_inner[ispec] == phase_is_inner && ispec_is_acoustic[ispec] ) {
stf = (realw) stf_pre_compute[isource];
iglob = ibool[INDEX4(5,5,5,i,j,k,ispec)]-1;
kappal = kappastore[INDEX4(5,5,5,i,j,k,ispec)];
atomicAdd(&potential_dot_dot_acoustic[iglob],
-sourcearrays[INDEX5(NSOURCES, 3, 5, 5,isource, 0, i,j,k)]*stf/kappal);
// potential_dot_dot_acoustic[iglob] +=
// -sourcearrays[INDEX5(NSOURCES, 3, 5, 5,isource, 0, i,j,k)]*stf/kappal;
}
}
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(compute_add_sources_ac_cuda,
COMPUTE_ADD_SOURCES_AC_CUDA)(long* Mesh_pointer_f,
int* phase_is_innerf,
int* NSOURCESf,
double* h_stf_pre_compute,
int* myrankf) {
TRACE("compute_add_sources_ac_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer_f); //get mesh pointer out of fortran integer container
// check if anything to do
if( mp->nsources_local == 0 ) return;
int phase_is_inner = *phase_is_innerf;
int NSOURCES = *NSOURCESf;
int myrank = *myrankf;
int num_blocks_x = NSOURCES;
int num_blocks_y = 1;
while(num_blocks_x > 65535) {
num_blocks_x = (int) ceil(num_blocks_x*0.5f);
num_blocks_y = num_blocks_y*2;
}
// copies pre-computed source time factors onto GPU
print_CUDA_error_if_any(cudaMemcpy(mp->d_stf_pre_compute,h_stf_pre_compute,
NSOURCES*sizeof(double),cudaMemcpyHostToDevice),18);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(5,5,5);
compute_add_sources_acoustic_kernel<<<grid,threads>>>(mp->d_potential_dot_dot_acoustic,
mp->d_ibool,
mp->d_ispec_is_inner,
phase_is_inner,
mp->d_sourcearrays,
mp->d_stf_pre_compute,
myrank,
mp->d_islice_selected_source,
mp->d_ispec_selected_source,
mp->d_ispec_is_acoustic,
mp->d_kappastore,
NSOURCES);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("compute_add_sources_ac_cuda");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(compute_add_sources_ac_s3_cuda,
COMPUTE_ADD_SOURCES_AC_s3_CUDA)(long* Mesh_pointer_f,
int* phase_is_innerf,
int* NSOURCESf,
double* h_stf_pre_compute,
int* myrankf) {
TRACE("compute_add_sources_ac_s3_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer_f); //get mesh pointer out of fortran integer container
// check if anything to do
if( mp->nsources_local == 0 ) return;
int phase_is_inner = *phase_is_innerf;
int NSOURCES = *NSOURCESf;
int myrank = *myrankf;
int num_blocks_x = NSOURCES;
int num_blocks_y = 1;
while(num_blocks_x > 65535) {
num_blocks_x = (int) ceil(num_blocks_x*0.5f);
num_blocks_y = num_blocks_y*2;
}
// copies source time factors onto GPU
print_CUDA_error_if_any(cudaMemcpy(mp->d_stf_pre_compute,h_stf_pre_compute,
NSOURCES*sizeof(double),cudaMemcpyHostToDevice),18);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(5,5,5);
compute_add_sources_acoustic_kernel<<<grid,threads>>>(mp->d_b_potential_dot_dot_acoustic,
mp->d_ibool,
mp->d_ispec_is_inner,
phase_is_inner,
mp->d_sourcearrays,
mp->d_stf_pre_compute,
myrank,
mp->d_islice_selected_source,
mp->d_ispec_selected_source,
mp->d_ispec_is_acoustic,
mp->d_kappastore,
NSOURCES);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("compute_add_sources_ac_s3_cuda");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// acoustic adjoint sources
/* ----------------------------------------------------------------------------------------------- */
__global__ void add_sources_ac_SIM_TYPE_2_OR_3_kernel(realw* potential_dot_dot_acoustic,
int nrec,
realw* adj_sourcearrays,
int* ibool,
int* ispec_is_inner,
int* ispec_is_acoustic,
int* ispec_selected_rec,
int phase_is_inner,
int* pre_computed_irec,
int nadj_rec_local,
realw* kappastore) {
int irec_local = blockIdx.x + gridDim.x*blockIdx.y;
// because of grid shape, irec_local can be too big
if(irec_local < nadj_rec_local) {
int irec = pre_computed_irec[irec_local];
int ispec = ispec_selected_rec[irec]-1;
if( ispec_is_acoustic[ispec] ){
// checks if element is in phase_is_inner run
if(ispec_is_inner[ispec] == phase_is_inner) {
int i = threadIdx.x;
int j = threadIdx.y;
int k = threadIdx.z;
int iglob = ibool[INDEX4(5,5,5,i,j,k,ispec)]-1;
//kappal = kappastore[INDEX4(5,5,5,i,j,k,ispec)];
//potential_dot_dot_acoustic[iglob] += adj_sourcearrays[INDEX6(nadj_rec_local,NTSTEP_BETWEEN_ADJSRC,3,5,5,
// pre_computed_irec_local_index[irec],
// pre_computed_index,
// 0,
// i,j,k)]/kappal;
// beware, for acoustic medium, a pressure source would be taking the negative
// and divide by Kappa of the fluid;
// this would have to be done when constructing the adjoint source.
//
// note: we take the first component of the adj_sourcearrays
// the idea is to have e.g. a pressure source, where all 3 components would be the same
realw stf = adj_sourcearrays[INDEX5(5,5,5,3,i,j,k,0,irec_local)]; // / kappal
atomicAdd(&potential_dot_dot_acoustic[iglob],stf);
//+adj_sourcearrays[INDEX6(nadj_rec_local,NTSTEP_BETWEEN_ADJSRC,3,5,5,
// pre_computed_irec_local_index[irec],pre_computed_index-1,
// 0,i,j,k)] // / kappal
// );
}
}
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(add_sources_ac_sim_2_or_3_cuda,
ADD_SOURCES_AC_SIM_2_OR_3_CUDA)(long* Mesh_pointer,
realw* h_adj_sourcearrays,
int* phase_is_inner,
int* h_ispec_is_inner,
int* h_ispec_is_acoustic,
int* h_ispec_selected_rec,
int* myrank,
int* nrec,
int* time_index,
int* h_islice_selected_rec,
int* nadj_rec_local,
int* NTSTEP_BETWEEN_READ_ADJSRC) {
TRACE("add_sources_ac_sim_2_or_3_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
// checks
if( *nadj_rec_local != mp->nadj_rec_local) exit_on_cuda_error("add_sources_ac_sim_type_2_or_3: nadj_rec_local not equal\n");
// make sure grid dimension is less than 65535 in x dimension
int num_blocks_x = mp->nadj_rec_local;
int num_blocks_y = 1;
while(num_blocks_x > 65535) {
num_blocks_x = (int) ceil(num_blocks_x*0.5f);
num_blocks_y = num_blocks_y*2;
}
dim3 grid(num_blocks_x,num_blocks_y,1);
dim3 threads(5,5,5);
// build slice of adj_sourcearrays because full array is *very* large.
// note: this extracts array values for local adjoint sources at given time step "time_index"
// from large adj_sourcearrays array into h_adj_sourcearrays_slice
int ispec,i,j,k;
int irec_local = 0;
for(int irec = 0; irec < *nrec; irec++) {
if(*myrank == h_islice_selected_rec[irec]) {
irec_local++;
// takes only acoustic sources
ispec = h_ispec_selected_rec[irec]-1;
if( h_ispec_is_acoustic[ispec] ){
if( h_ispec_is_inner[ispec] == *phase_is_inner) {
for(k=0;k<5;k++) {
for(j=0;j<5;j++) {
for(i=0;i<5;i++) {
mp->h_adj_sourcearrays_slice[INDEX5(5,5,5,3,i,j,k,0,irec_local-1)]
= h_adj_sourcearrays[INDEX6(mp->nadj_rec_local,
*NTSTEP_BETWEEN_READ_ADJSRC,
3,5,5,
irec_local-1,(*time_index)-1,
0,i,j,k)];
mp->h_adj_sourcearrays_slice[INDEX5(5,5,5,3,i,j,k,1,irec_local-1)]
= h_adj_sourcearrays[INDEX6(mp->nadj_rec_local,
*NTSTEP_BETWEEN_READ_ADJSRC,
3,5,5,
irec_local-1,(*time_index)-1,
1,i,j,k)];
mp->h_adj_sourcearrays_slice[INDEX5(5,5,5,3,i,j,k,2,irec_local-1)]
= h_adj_sourcearrays[INDEX6(mp->nadj_rec_local,
*NTSTEP_BETWEEN_READ_ADJSRC,
3,5,5,
irec_local-1,(*time_index)-1,
2,i,j,k)];
}
}
}
} // phase_is_inner
} // h_ispec_is_acoustic
}
}
// check all local sources were added
if( irec_local != mp->nadj_rec_local) exit_on_error("irec_local not equal to nadj_rec_local\n");
// copies extracted array values onto GPU
print_CUDA_error_if_any(cudaMemcpy(mp->d_adj_sourcearrays, mp->h_adj_sourcearrays_slice,
(mp->nadj_rec_local)*3*NGLL3*sizeof(realw),cudaMemcpyHostToDevice),99099);
// launches cuda kernel for acoustic adjoint sources
add_sources_ac_SIM_TYPE_2_OR_3_kernel<<<grid,threads>>>(mp->d_potential_dot_dot_acoustic,
*nrec,
mp->d_adj_sourcearrays,
mp->d_ibool,
mp->d_ispec_is_inner,
mp->d_ispec_is_acoustic,
mp->d_ispec_selected_rec,
*phase_is_inner,
mp->d_pre_computed_irec,
mp->nadj_rec_local,
mp->d_kappastore);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("add_sources_acoustic_SIM_TYPE_2_OR_3_kernel");
#endif
}
|
4fa8cc3cd4952e7c24122741d09e3a9152dc3597.hip | // !!! This is a file automatically generated by hipify!!!
#include "BigFloat.h"
#ifdef _WIN32
#ifdef __HIPCC__
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define HOST __host__
#define DEVICE __device__
#endif
#endif
typedef union {
double d;
struct {
uint64_t mantissa : 52;
uint64_t exponent : 11;
uint64_t sign : 1;
} parts;
} double_cast;
HOST DEVICE
int msbPos(uint64_t val) {
if (val) {
int pos = 64;
uint64_t check = ((uint64_t)1) << 63;
while ((val & check) == 0) {
--pos;
check >>= 1;
}
return pos;
}
return 0;
}
HOST DEVICE
BigFloat* normalize(BigFloat *val) {
int msByte = -1;
for (int i = 0; i < BF_SIZE - 1; i++) {
val->data[i+1] += val->data[i] >> 32;
val->data[i] &= 0xFFFFFFFF;
if (val->data[i]) {
msByte = i;
}
}
int MSB = msbPos(val->data[BF_SIZE-1]);
if (MSB == 0) {
if (msByte < 0) {
val->exponent = 0;
val->negative = 0;
return val;
}
for (int i = msByte; i >= 0; --i) {
val->data[i + (BF_SIZE-1 - msByte)] = val->data[i];
val->data[i] = 0;
}
val->exponent -= (BF_SIZE-1 - msByte) * 32;
MSB = msbPos(val->data[BF_SIZE-1]);
}
if (MSB > 32) {
uint64_t toAdd = 0;
for (int i = BF_SIZE-1; i >= 0; --i) {
val->data[i] |= toAdd;
toAdd = (val->data[i] & ((1 << (MSB-32)) - 1)) << 32;
val->data[i] >>= MSB-32;
}
val->exponent += MSB-32;
} else if (MSB < 32) {
uint64_t toAdd = 0;
for (int i = 0; i < BF_SIZE; i++) {
val->data[i] = (val->data[i] << (32-MSB)) | toAdd;
toAdd = val->data[i] >> 32;
val->data[i] &= 0xFFFFFFFF;
}
val->exponent -= 32-MSB;
}
return val;
}
HOST DEVICE
BigFloat* init(BigFloat *val, uint32_t number) {
val->negative = 0;
val->exponent = 0;
val->data[0] = number;
for (int i = 1; i < BF_SIZE; i++) {
val->data[i] = 0;
}
return normalize(val);
}
HOST
BigFloat* initDouble(BigFloat *val, double number) {
double_cast dc;
dc.d = number;
val->negative = dc.parts.sign;
if (dc.parts.exponent) {
val->exponent = dc.parts.exponent - 1023 - 52;
val->data[0] = ((uint64_t)1)<<52 | dc.parts.mantissa;
} else {
val->exponent = 1 - 1023 - 52;
val->data[0] = dc.parts.mantissa;
}
for (int i = 1; i < BF_SIZE; i++) {
val->data[i] = 0;
}
return normalize(val);
}
HOST DEVICE
void assign(BigFloat *dest, BigFloat *src) {
dest->negative = src->negative;
dest->exponent = src->exponent;
for (int i = 0; i < BF_SIZE; i++) {
dest->data[i] = src->data[i];
}
}
HOST DEVICE
int isZero(BigFloat *val) {
return val->data[BF_SIZE-1] == 0;
}
HOST DEVICE
int magCmp(BigFloat *one, BigFloat *two) {
if (one->exponent != two->exponent) {
return one->exponent < two->exponent ? LT : GT;
}
for (int i = BF_SIZE-1; i >= 0; --i) {
if (one->data[i] != two->data[i]) {
return one->data[i] < two->data[i] ? LT : GT;
}
}
return EQ;
}
HOST DEVICE
int cmp(BigFloat *one, BigFloat *two) {
if (one->negative != two->negative) {
return one->negative ? LT : GT;
}
return (one->negative ? -1 : 1) * magCmp(one, two);
}
HOST DEVICE
int base2Cmp(BigFloat *val, int32_t power) {
if (isZero(val)) {
return LT;
}
power -= BF_SIZE*32 - 1;
if (val->exponent < power) {
return LT;
} else if (val->exponent > power) {
return GT;
}
if (val->data[BF_SIZE-1] & 0x7FFFFFFF) {
return GT;
}
for (int i = BF_SIZE-2; i >= 0; i--) {
if (val->data[i]) {
return GT;
}
}
return EQ;
}
HOST DEVICE
BigFloat* shiftL(BigFloat *val, uint64_t amount) {
val->exponent += amount;
return val;
}
HOST DEVICE
BigFloat* shiftR(BigFloat *val, uint64_t amount) {
val->exponent -= amount;
return val;
}
HOST DEVICE
BigFloat* add(BigFloat *one, BigFloat *two, BigFloat *result) {
if (one->negative != two->negative) {
// positive + negative = positive - positive(-1*negative)
// negative + positive = negative - negative(-1*positive)
two->negative ^= 1;
(void)sub(one, two, result); // already normalizes result
two->negative ^= 1;
return result;
}
for (int i = 0; i < BF_SIZE; i++) {
result->data[i] = 0;
}
result->negative = one->negative;
BigFloat *larger = one;
BigFloat *smaller = two;
if (magCmp(larger, smaller) == LT) {
larger = two;
smaller = one;
}
result->exponent = larger->exponent;
int ndxDiff = 1 + (larger->exponent - smaller->exponent) / 32;
int bitDiff = 32 - (larger->exponent - smaller->exponent) % 32;
for (int i = 0; i < BF_SIZE; i++) {
result->data[i] += larger->data[i];
if (i - ndxDiff + 1 >= 0) {
uint64_t tmp = smaller->data[i] << bitDiff;
result->data[i - ndxDiff + 1] += tmp >> 32;
if (i - ndxDiff >= 0) {
result->data[i - ndxDiff] += tmp & 0xFFFFFFFF;
}
}
}
return normalize(result);
}
HOST DEVICE
void carryToNdx(BigFloat *val, const int ndx) {
int startNdx = ndx;
while (++startNdx < BF_SIZE && val->data[startNdx] == 0) ;
while (startNdx > ndx) {
if (val->data[startNdx] & 0xFFFFFFFF) {
val->data[startNdx-1] |= val->data[startNdx] << 32;
val->data[startNdx] &= 0xFFFFFFFF00000000;
} else {
val->data[startNdx ] -= 0x00000000FFFFFFFF;
val->data[startNdx-1] |= 0xFFFFFFFF00000000;
}
startNdx--;
}
}
HOST DEVICE
BigFloat* sub(BigFloat *one, BigFloat *two, BigFloat *result) {
if (one->negative != two->negative) {
// negative - positive = negative + negative(-1*positive)
// positive - negative = positive + positive(-1*negative)
two->negative ^= 1;
(void)add(one, two, result); // already normalizes result
two->negative ^= 1;
return result;
}
for (int i = 0; i < BF_SIZE; i++) {
result->data[i] = 0;
}
BigFloat *larger = one;
BigFloat *smaller = two;
result->negative = larger->negative;
if (magCmp(larger, smaller) == LT) {
larger = two;
smaller = one;
result->negative ^= 1;
}
result->exponent = larger->exponent;
int ndxDiff = 1 + (larger->exponent - smaller->exponent) / 32;
int bitDiff = 32 - (larger->exponent - smaller->exponent) % 32;
// Because we carry from larger to smaller, take care of larger first so when
// we carry we don't have to uncarry later
for (int i = BF_SIZE-1; i >= 0; i--) {
result->data[i] += larger->data[i];
if (i - ndxDiff < BF_SIZE) {
uint64_t tmp = smaller->data[i] << bitDiff;
if (i - ndxDiff + 1 < BF_SIZE && i - ndxDiff + 1 >= 0) {
uint64_t upper = tmp >> 32;
if (result->data[i - ndxDiff + 1] < upper) {
carryToNdx(result, i - ndxDiff + 1);
}
result->data[i - ndxDiff + 1] -= upper;
}
if (i - ndxDiff > 0) {
uint64_t lower = tmp & 0xFFFFFFFF;
if (result->data[i - ndxDiff] < lower) {
carryToNdx(result, i - ndxDiff);
}
result->data[i - ndxDiff] -= lower;
}
}
}
return normalize(result);
}
HOST DEVICE
BigFloat* mult(BigFloat *one, BigFloat *two, BigFloat *result, BigFloat *tmp) {
result->negative = one->negative ^ two->negative;
result->exponent = one->exponent + two->exponent + BF_SIZE*32;
for (int i = 0; i < BF_SIZE; i++) {
result->data[i] = 0;
tmp->data[i] = 0;
}
for (int i = BF_SIZE - 1; i >= 0; --i) {
for (int j = BF_SIZE - 1; j >= 0; --j) {
uint64_t prod = one->data[i] * two->data[j];
int topNdx = i + j - (BF_SIZE - 1);
if (topNdx > 0) {
result->data[topNdx ] += prod >> 32;
result->data[topNdx-1] += prod & 0xFFFFFFFF;
} else {
tmp->data[BF_SIZE + topNdx ] += prod >> 32;
tmp->data[BF_SIZE + topNdx-1] += prod & 0xFFFFFFFF;
}
}
}
for (int i = 0; i < BF_SIZE - 1; i++) {
tmp->data[i+1] += tmp->data[i] >> 32;
}
result->data[0] += tmp->data[BF_SIZE-1] >> 32;
return normalize(result);
}
HOST
std::ostream& operator<<(std::ostream& os, const BigFloat& bf) {
std::ios::fmtflags flags = os.flags();
int width = os.width();
int pos = BF_SIZE;
if (bf.negative) {
os << "- ";
}
os << "0x";
while (pos--)
os << std::noshowbase << std::hex << std::setw(8) << std::setfill('0') << (uint32_t)(bf.data[pos]) << " ";
os.width(width);
os.flags(flags);
os << " x 2^(" << bf.exponent << ")";
return os;
}
HOST
BigFloat& BigFloat::operator=(const double d) {
return *initDouble(this, d);
}
HOST
BigFloat& BigFloat::operator+=(BigFloat bf) {
BigFloat temp;
assign(&temp, this);
return *add(&temp, &bf, this);
}
HOST
BigFloat BigFloat::operator*(const unsigned int i) {
BigFloat result;
BigFloat temp;
BigFloat multiplier;
mult(this, init(&multiplier, i), &result, &temp);
return result;
}
HOST
BigFloat& BigFloat::operator>>=(const int i) {
return *shiftR(this, i);
}
HOST
BigFloat BigFloat::operator>>(const int i) {
BigFloat val;
assign(&val, this);
return *shiftR(&val, i);
}
| 4fa8cc3cd4952e7c24122741d09e3a9152dc3597.cu | #include "BigFloat.h"
#ifdef _WIN32
#ifdef __CUDACC__
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define HOST __host__
#define DEVICE __device__
#endif
#endif
typedef union {
double d;
struct {
uint64_t mantissa : 52;
uint64_t exponent : 11;
uint64_t sign : 1;
} parts;
} double_cast;
HOST DEVICE
int msbPos(uint64_t val) {
if (val) {
int pos = 64;
uint64_t check = ((uint64_t)1) << 63;
while ((val & check) == 0) {
--pos;
check >>= 1;
}
return pos;
}
return 0;
}
HOST DEVICE
BigFloat* normalize(BigFloat *val) {
int msByte = -1;
for (int i = 0; i < BF_SIZE - 1; i++) {
val->data[i+1] += val->data[i] >> 32;
val->data[i] &= 0xFFFFFFFF;
if (val->data[i]) {
msByte = i;
}
}
int MSB = msbPos(val->data[BF_SIZE-1]);
if (MSB == 0) {
if (msByte < 0) {
val->exponent = 0;
val->negative = 0;
return val;
}
for (int i = msByte; i >= 0; --i) {
val->data[i + (BF_SIZE-1 - msByte)] = val->data[i];
val->data[i] = 0;
}
val->exponent -= (BF_SIZE-1 - msByte) * 32;
MSB = msbPos(val->data[BF_SIZE-1]);
}
if (MSB > 32) {
uint64_t toAdd = 0;
for (int i = BF_SIZE-1; i >= 0; --i) {
val->data[i] |= toAdd;
toAdd = (val->data[i] & ((1 << (MSB-32)) - 1)) << 32;
val->data[i] >>= MSB-32;
}
val->exponent += MSB-32;
} else if (MSB < 32) {
uint64_t toAdd = 0;
for (int i = 0; i < BF_SIZE; i++) {
val->data[i] = (val->data[i] << (32-MSB)) | toAdd;
toAdd = val->data[i] >> 32;
val->data[i] &= 0xFFFFFFFF;
}
val->exponent -= 32-MSB;
}
return val;
}
HOST DEVICE
BigFloat* init(BigFloat *val, uint32_t number) {
val->negative = 0;
val->exponent = 0;
val->data[0] = number;
for (int i = 1; i < BF_SIZE; i++) {
val->data[i] = 0;
}
return normalize(val);
}
HOST
BigFloat* initDouble(BigFloat *val, double number) {
double_cast dc;
dc.d = number;
val->negative = dc.parts.sign;
if (dc.parts.exponent) {
val->exponent = dc.parts.exponent - 1023 - 52;
val->data[0] = ((uint64_t)1)<<52 | dc.parts.mantissa;
} else {
val->exponent = 1 - 1023 - 52;
val->data[0] = dc.parts.mantissa;
}
for (int i = 1; i < BF_SIZE; i++) {
val->data[i] = 0;
}
return normalize(val);
}
HOST DEVICE
void assign(BigFloat *dest, BigFloat *src) {
dest->negative = src->negative;
dest->exponent = src->exponent;
for (int i = 0; i < BF_SIZE; i++) {
dest->data[i] = src->data[i];
}
}
HOST DEVICE
int isZero(BigFloat *val) {
return val->data[BF_SIZE-1] == 0;
}
HOST DEVICE
int magCmp(BigFloat *one, BigFloat *two) {
if (one->exponent != two->exponent) {
return one->exponent < two->exponent ? LT : GT;
}
for (int i = BF_SIZE-1; i >= 0; --i) {
if (one->data[i] != two->data[i]) {
return one->data[i] < two->data[i] ? LT : GT;
}
}
return EQ;
}
HOST DEVICE
int cmp(BigFloat *one, BigFloat *two) {
if (one->negative != two->negative) {
return one->negative ? LT : GT;
}
return (one->negative ? -1 : 1) * magCmp(one, two);
}
HOST DEVICE
int base2Cmp(BigFloat *val, int32_t power) {
if (isZero(val)) {
return LT;
}
power -= BF_SIZE*32 - 1;
if (val->exponent < power) {
return LT;
} else if (val->exponent > power) {
return GT;
}
if (val->data[BF_SIZE-1] & 0x7FFFFFFF) {
return GT;
}
for (int i = BF_SIZE-2; i >= 0; i--) {
if (val->data[i]) {
return GT;
}
}
return EQ;
}
HOST DEVICE
BigFloat* shiftL(BigFloat *val, uint64_t amount) {
val->exponent += amount;
return val;
}
HOST DEVICE
BigFloat* shiftR(BigFloat *val, uint64_t amount) {
val->exponent -= amount;
return val;
}
HOST DEVICE
BigFloat* add(BigFloat *one, BigFloat *two, BigFloat *result) {
if (one->negative != two->negative) {
// positive + negative = positive - positive(-1*negative)
// negative + positive = negative - negative(-1*positive)
two->negative ^= 1;
(void)sub(one, two, result); // already normalizes result
two->negative ^= 1;
return result;
}
for (int i = 0; i < BF_SIZE; i++) {
result->data[i] = 0;
}
result->negative = one->negative;
BigFloat *larger = one;
BigFloat *smaller = two;
if (magCmp(larger, smaller) == LT) {
larger = two;
smaller = one;
}
result->exponent = larger->exponent;
int ndxDiff = 1 + (larger->exponent - smaller->exponent) / 32;
int bitDiff = 32 - (larger->exponent - smaller->exponent) % 32;
for (int i = 0; i < BF_SIZE; i++) {
result->data[i] += larger->data[i];
if (i - ndxDiff + 1 >= 0) {
uint64_t tmp = smaller->data[i] << bitDiff;
result->data[i - ndxDiff + 1] += tmp >> 32;
if (i - ndxDiff >= 0) {
result->data[i - ndxDiff] += tmp & 0xFFFFFFFF;
}
}
}
return normalize(result);
}
HOST DEVICE
void carryToNdx(BigFloat *val, const int ndx) {
int startNdx = ndx;
while (++startNdx < BF_SIZE && val->data[startNdx] == 0) ;
while (startNdx > ndx) {
if (val->data[startNdx] & 0xFFFFFFFF) {
val->data[startNdx-1] |= val->data[startNdx] << 32;
val->data[startNdx] &= 0xFFFFFFFF00000000;
} else {
val->data[startNdx ] -= 0x00000000FFFFFFFF;
val->data[startNdx-1] |= 0xFFFFFFFF00000000;
}
startNdx--;
}
}
HOST DEVICE
BigFloat* sub(BigFloat *one, BigFloat *two, BigFloat *result) {
if (one->negative != two->negative) {
// negative - positive = negative + negative(-1*positive)
// positive - negative = positive + positive(-1*negative)
two->negative ^= 1;
(void)add(one, two, result); // already normalizes result
two->negative ^= 1;
return result;
}
for (int i = 0; i < BF_SIZE; i++) {
result->data[i] = 0;
}
BigFloat *larger = one;
BigFloat *smaller = two;
result->negative = larger->negative;
if (magCmp(larger, smaller) == LT) {
larger = two;
smaller = one;
result->negative ^= 1;
}
result->exponent = larger->exponent;
int ndxDiff = 1 + (larger->exponent - smaller->exponent) / 32;
int bitDiff = 32 - (larger->exponent - smaller->exponent) % 32;
// Because we carry from larger to smaller, take care of larger first so when
// we carry we don't have to uncarry later
for (int i = BF_SIZE-1; i >= 0; i--) {
result->data[i] += larger->data[i];
if (i - ndxDiff < BF_SIZE) {
uint64_t tmp = smaller->data[i] << bitDiff;
if (i - ndxDiff + 1 < BF_SIZE && i - ndxDiff + 1 >= 0) {
uint64_t upper = tmp >> 32;
if (result->data[i - ndxDiff + 1] < upper) {
carryToNdx(result, i - ndxDiff + 1);
}
result->data[i - ndxDiff + 1] -= upper;
}
if (i - ndxDiff > 0) {
uint64_t lower = tmp & 0xFFFFFFFF;
if (result->data[i - ndxDiff] < lower) {
carryToNdx(result, i - ndxDiff);
}
result->data[i - ndxDiff] -= lower;
}
}
}
return normalize(result);
}
HOST DEVICE
BigFloat* mult(BigFloat *one, BigFloat *two, BigFloat *result, BigFloat *tmp) {
result->negative = one->negative ^ two->negative;
result->exponent = one->exponent + two->exponent + BF_SIZE*32;
for (int i = 0; i < BF_SIZE; i++) {
result->data[i] = 0;
tmp->data[i] = 0;
}
for (int i = BF_SIZE - 1; i >= 0; --i) {
for (int j = BF_SIZE - 1; j >= 0; --j) {
uint64_t prod = one->data[i] * two->data[j];
int topNdx = i + j - (BF_SIZE - 1);
if (topNdx > 0) {
result->data[topNdx ] += prod >> 32;
result->data[topNdx-1] += prod & 0xFFFFFFFF;
} else {
tmp->data[BF_SIZE + topNdx ] += prod >> 32;
tmp->data[BF_SIZE + topNdx-1] += prod & 0xFFFFFFFF;
}
}
}
for (int i = 0; i < BF_SIZE - 1; i++) {
tmp->data[i+1] += tmp->data[i] >> 32;
}
result->data[0] += tmp->data[BF_SIZE-1] >> 32;
return normalize(result);
}
HOST
std::ostream& operator<<(std::ostream& os, const BigFloat& bf) {
std::ios::fmtflags flags = os.flags();
int width = os.width();
int pos = BF_SIZE;
if (bf.negative) {
os << "- ";
}
os << "0x";
while (pos--)
os << std::noshowbase << std::hex << std::setw(8) << std::setfill('0') << (uint32_t)(bf.data[pos]) << " ";
os.width(width);
os.flags(flags);
os << " x 2^(" << bf.exponent << ")";
return os;
}
HOST
BigFloat& BigFloat::operator=(const double d) {
return *initDouble(this, d);
}
HOST
BigFloat& BigFloat::operator+=(BigFloat bf) {
BigFloat temp;
assign(&temp, this);
return *add(&temp, &bf, this);
}
HOST
BigFloat BigFloat::operator*(const unsigned int i) {
BigFloat result;
BigFloat temp;
BigFloat multiplier;
mult(this, init(&multiplier, i), &result, &temp);
return result;
}
HOST
BigFloat& BigFloat::operator>>=(const int i) {
return *shiftR(this, i);
}
HOST
BigFloat BigFloat::operator>>(const int i) {
BigFloat val;
assign(&val, this);
return *shiftR(&val, i);
}
|
6a203c992a69470ea80f43293a75a3e2fb1ff8b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<iostream>
#include "mesh.h"
__global__ void kernel()
{
printf("Kernel Launched");
}
extern "C" void RunTest()
{
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
printf("cuda.cu launched \n");
std::cout<<"cuda.cu launched"<<std::endl;
}
| 6a203c992a69470ea80f43293a75a3e2fb1ff8b2.cu | #include<stdio.h>
#include<iostream>
#include "mesh.h"
__global__ void kernel()
{
printf("Kernel Launched");
}
extern "C" void RunTest()
{
kernel<<<1,1>>>();
printf("cuda.cu launched \n");
std::cout<<"cuda.cu launched"<<std::endl;
}
|
c0727495883fd1d4c5436eb86bb710e3d6e67bc4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/opencv.hpp>
using namespace cv;
#define HEIGHT 512 /**/
#define WIDTH 512 /**/
#define MAX_COLOR 255
#define GRADIENT_DIVISOR 2
__global__ void Brite_Kernel(uchar* d_frame_out, uchar* d_frame_in,
int height, int width, int bri) /* Original height and width */
{
#ifdef ORIGINAL_CODE
int x = threadIdx.x + blockIdx.x * blockDim.x;
if (x > 0 && x < width) {
for (int y = 0; y < height; y++)
{
for (int z = 0; z < 3; z++) {
if (x >= width / 2) {
if (d_frame_in[(y * width + x) * 3 + z] + bri > 255) {
d_frame_out[(y * width + x) * 3 + z] = 255;
}
else {
d_frame_out[(y * width + x) * 3 + z] = d_frame_in[(y * width + x) * 3 + z] + bri;
}
}
else {
d_frame_out[(y * width + x) * 3 + z] = d_frame_in[(y * width + x) * 3 + z];
}
}
}
}
#else
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= HEIGHT)
{
return;
}
for (int j = 0; j < WIDTH; ++j)
{
for (int k = 0; k < 3; ++k)
{
int nColor = d_frame_in[(i * width + j) * 3 + k] + j / GRADIENT_DIVISOR;
d_frame_out[(i * width + j) * 3 + k] = (nColor < MAX_COLOR) ? nColor : MAX_COLOR;
}
}
#endif
}
int main()
{
Mat img1 = imread("objects.jpg");
int img1_size = img1.rows * img1.step;
uchar* frame1 = (uchar*)calloc(img1_size, sizeof(uchar));
/* Load Image RGB Values */
for (int i = 0; i < img1.size().height; i++)
{
for (int j = 0; j < img1.size().width; j++)
{
for (int z = 0; z < 3; z++) {
frame1[(i * img1.size().width + j) * 3 + z] = img1.at<Vec3b>(i, j)[z];
}
}
}
uchar* d_frame_in;
uchar* d_frame_out;
hipMalloc((void**)&d_frame_in, sizeof(uchar) * img1_size);
hipMalloc((void**)&d_frame_out, sizeof(uchar) * img1_size);
hipMemcpy(d_frame_in, frame1, sizeof(uchar) * img1_size, hipMemcpyHostToDevice);
///* Image shift */
Brite_Kernel << <16, 64 >> > (d_frame_out, d_frame_in,
HEIGHT, WIDTH, 100 /* Original height and width */);
hipMemcpy(frame1, d_frame_out, sizeof(uchar) * img1_size, hipMemcpyDeviceToHost);
/* Load shift Image RGB */
for (int i = 0; i < img1.size().height; i++)
{
for (int j = 0; j < img1.size().width; j++)
{
for (int z = 0; z < 3; z++) {
img1.at<Vec3b>(i, j)[z] = frame1[(i * img1.size().width + j) * 3 + z];
}
}
}
// create a window
namedWindow("mainWin", WINDOW_AUTOSIZE);
moveWindow("mainWin", 100, 100);
// show the image
imshow("mainWin", img1);
// wait for a key
waitKey(0);
return 0;
}
| c0727495883fd1d4c5436eb86bb710e3d6e67bc4.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/opencv.hpp>
using namespace cv;
#define HEIGHT 512 /*原始圖片高*/
#define WIDTH 512 /*原始圖片寬*/
#define MAX_COLOR 255
#define GRADIENT_DIVISOR 2
__global__ void Brite_Kernel(uchar* d_frame_out, uchar* d_frame_in,
int height, int width, int bri) /* Original height and width */
{
#ifdef ORIGINAL_CODE
int x = threadIdx.x + blockIdx.x * blockDim.x;
if (x > 0 && x < width) {
for (int y = 0; y < height; y++)
{
for (int z = 0; z < 3; z++) {
if (x >= width / 2) {
if (d_frame_in[(y * width + x) * 3 + z] + bri > 255) {
d_frame_out[(y * width + x) * 3 + z] = 255;
}
else {
d_frame_out[(y * width + x) * 3 + z] = d_frame_in[(y * width + x) * 3 + z] + bri;
}
}
else {
d_frame_out[(y * width + x) * 3 + z] = d_frame_in[(y * width + x) * 3 + z];
}
}
}
}
#else
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= HEIGHT)
{
return;
}
for (int j = 0; j < WIDTH; ++j)
{
for (int k = 0; k < 3; ++k)
{
int nColor = d_frame_in[(i * width + j) * 3 + k] + j / GRADIENT_DIVISOR;
d_frame_out[(i * width + j) * 3 + k] = (nColor < MAX_COLOR) ? nColor : MAX_COLOR;
}
}
#endif
}
int main()
{
Mat img1 = imread("objects.jpg");
int img1_size = img1.rows * img1.step;
uchar* frame1 = (uchar*)calloc(img1_size, sizeof(uchar));
/* Load Image RGB Values */
for (int i = 0; i < img1.size().height; i++)
{
for (int j = 0; j < img1.size().width; j++)
{
for (int z = 0; z < 3; z++) {
frame1[(i * img1.size().width + j) * 3 + z] = img1.at<Vec3b>(i, j)[z];
}
}
}
uchar* d_frame_in;
uchar* d_frame_out;
cudaMalloc((void**)&d_frame_in, sizeof(uchar) * img1_size);
cudaMalloc((void**)&d_frame_out, sizeof(uchar) * img1_size);
cudaMemcpy(d_frame_in, frame1, sizeof(uchar) * img1_size, cudaMemcpyHostToDevice);
///* Image shift */
Brite_Kernel << <16, 64 >> > (d_frame_out, d_frame_in,
HEIGHT, WIDTH, 100 /* Original height and width */);
cudaMemcpy(frame1, d_frame_out, sizeof(uchar) * img1_size, cudaMemcpyDeviceToHost);
/* Load shift Image RGB */
for (int i = 0; i < img1.size().height; i++)
{
for (int j = 0; j < img1.size().width; j++)
{
for (int z = 0; z < 3; z++) {
img1.at<Vec3b>(i, j)[z] = frame1[(i * img1.size().width + j) * 3 + z];
}
}
}
// create a window
namedWindow("mainWin", WINDOW_AUTOSIZE);
moveWindow("mainWin", 100, 100);
// show the image
imshow("mainWin", img1);
// wait for a key
waitKey(0);
return 0;
}
|
d9abfa99fb20e65094f74516b9e4aabe13e9bfcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* ___ _ _ ___ _ __ __ _ ___
* / __| | | | \ /_\ | \/ | /_\ | _ \
* | (__| |_| | |) / _ \ | |\/| |/ _ \| _/
* \___|\___/|___/_/_\_\_|_|__|_/_/_\_\_|_ ___
* / __| | | | _ \ __| _ \___| _ \ __/ __|
* \__ \ |_| | _/ _|| /___| / _|\__ \
* |___/\___/|_| |___|_|_\ |_|_\___|___/
* 2012
*
* by Jens Wetzl ([email protected])
* and Oliver Taubmann ([email protected])
*
* This work is licensed under a Creative Commons
* Attribution 3.0 Unported License. (CC-BY)
* http://creativecommons.org/licenses/by/3.0/
*
**/
#include "SRSystemMatrix.h"
#include "cudalbfgs_error_checking.h"
#include <CudaLBFGS/timer.h>
using namespace std;
namespace gpu_SRSystemMatrix
{
__global__ void composeSingleSystem(const size_t offset, const float *H,
const size_t lowresWidth, const size_t lowresHeight,
const size_t highresWidth, const size_t highresHeight,
const float psfWidth, const int pixelRadius,
float *systemMatrixVals, int *systemMatrixCols,
int *systemMatrixRows);
}
SRSystemMatrix::SRSystemMatrix(const vector<MotionParams> &motionParams, const float psfWidth,
const LRImageStack &lrImages, const SRImage &srImage, const GPUHandles &gpuhandles,
const float radiusScale)
: m_height(lrImages.getNumImagePixels() * lrImages.getNumImages())
, m_width(srImage.getNumPixels())
, m_psfWidth(psfWidth)
, m_radiusScale(radiusScale)
, m_motionParams(motionParams)
, m_lrImages(lrImages)
, m_srImage(srImage)
, m_gpuHandles(gpuhandles)
, m_d_values_ccs(0)
, m_d_colPointers_ccs(0)
, m_d_rowIndices_ccs(0)
{
compose();
}
SRSystemMatrix::~SRSystemMatrix()
{
CudaSafeCall( hipFree(m_d_values) );
CudaSafeCall( hipFree(m_d_rowPointers) );
CudaSafeCall( hipFree(m_d_colIndices) );
#ifdef SUPERRES_STORE_TRANSPOSE
CudaSafeCall( hipFree(m_d_values_ccs) );
CudaSafeCall( hipFree(m_d_rowIndices_ccs) );
CudaSafeCall( hipFree(m_d_colPointers_ccs) );
#endif
}
void SRSystemMatrix::compose()
{
#ifdef SUPERRES_TIMING
timer composeTimer("composeSystemMatrix");
composeTimer.start();
#endif
const float zoom = float(m_srImage.getWidth()) / float(m_lrImages.getImageWidth());
const float maxPsfRadius = m_radiusScale * zoom * m_psfWidth;
int pixelRadius = (int)floor(maxPsfRadius + 0.5f);
if (2 * pixelRadius + 1 >= ::min(m_srImage.getWidth(), m_srImage.getHeight()))
{
cout << "WARNING: With the chosen settings for radius scale, zoom and psfWidth," <<
"the point spread function covers the whole SR image." << endl;
pixelRadius = (::min(m_srImage.getWidth(), m_srImage.getHeight()) - 1) / 2;
}
// Allocate memory for CRS (and CCS)
// The number of non-zero elements per matrix row is (2 * pixelRadius + 1)^2
size_t numValElements = (2 * pixelRadius + 1) * (2 * pixelRadius + 1) * m_height;
CudaSafeCall( hipMalloc((void**) &m_d_values, numValElements * sizeof(float)) );
CudaSafeCall( hipMalloc((void**) &m_d_colIndices, numValElements * sizeof(int) ) );
CudaSafeCall( hipMalloc((void**) &m_d_rowPointers, (m_height + 1) * sizeof(int) ) );
hipMemset(m_d_colIndices, 0, numValElements * sizeof(int));
#ifdef SUPERRES_STORE_TRANSPOSE
CudaSafeCall( hipMalloc((void**) &m_d_values_ccs, numValElements * sizeof(float)) );
CudaSafeCall( hipMalloc((void**) &m_d_rowIndices_ccs, numValElements * sizeof(int) ) );
CudaSafeCall( hipMalloc((void**) &m_d_colPointers_ccs, (m_width + 1) * sizeof(int) ) );
#endif
size_t offset = 0;
// Copy motion parameters to GPU
float *d_motionparams;
CudaSafeCall( hipMalloc((void**) &d_motionparams, m_motionParams.size() * 9 * sizeof(float)) );
CudaSafeCall( hipMemcpy(d_motionparams, &m_motionParams[0],
m_motionParams.size() * 9 * sizeof(float), hipMemcpyHostToDevice) );
// Compose the equation systems for each low-res image
for (size_t i = 0; i < m_lrImages.getNumImages(); ++i)
{
composeSingleSystem(offset, i, pixelRadius, d_motionparams);
offset += m_lrImages.getNumImagePixels();
}
hipDeviceSynchronize();
// The last element of the CRS row pointer is the number of non-zero elements
// The other entries of the row pointer array are set in the kernel
CudaSafeCall( hipMemcpy(m_d_rowPointers + m_height, &numValElements,
sizeof(int), hipMemcpyHostToDevice) );
CudaSafeCall( hipFree(d_motionparams) );
#ifdef SUPERRES_STORE_TRANSPOSE
// Create CCS structure from CRS structure
CusparseSafeCall( hipsparseScsr2csc(m_gpuHandles.cusparseHandle, m_height, m_width, m_d_values,
m_d_rowPointers, m_d_colIndices, m_d_values_ccs, m_d_rowIndices_ccs,
m_d_colPointers_ccs, 1, HIPSPARSE_INDEX_BASE_ZERO) );
#endif
#ifdef SUPERRES_TIMING
composeTimer.stop();
composeTimer.saveMeasurement();
#endif
}
void SRSystemMatrix::composeSingleSystem(const size_t offset, const size_t motionIdx, const int pixelRadius,
float *d_motionparams)
{
size_t height = m_lrImages.getNumImagePixels();
dim3 blockDim(512);
dim3 gridDim = dim3(height % blockDim.x == 0 ? height / blockDim.x : (height / blockDim.x) + 1);
hipLaunchKernelGGL(( gpu_SRSystemMatrix::composeSingleSystem), dim3(gridDim), dim3(blockDim), 0, 0, offset, &d_motionparams[9 * motionIdx],
m_lrImages.getImageWidth(), m_lrImages.getImageHeight(),
m_srImage.getWidth(), m_srImage.getHeight(),
m_psfWidth, pixelRadius,
m_d_values, m_d_colIndices, m_d_rowPointers);
CudaCheckError();
}
namespace gpu_SRSystemMatrix
{
__device__ int roundToInt(float val)
{
return (int)floor(val + 0.5f);
}
__global__ void composeSingleSystem(const size_t offset, const float *H,
const size_t lowresWidth, const size_t lowresHeight,
const size_t highresWidth, const size_t highresHeight,
const float psfWidth, const int pixelRadius,
float *systemMatrixVals, int *systemMatrixCols,
int *systemMatrixRows)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
const size_t lowresPixels = lowresWidth * lowresHeight;
if (idx >= lowresPixels)
return;
// Coordinates of this thread in the low-res image
size_t x = idx % lowresWidth;
size_t y = idx / lowresWidth;
// Row that this thread writes in the full system matrix
size_t r = idx + offset;
// Transform pixel coordinates from the LR grid to the desired HR grid
float hrx, hry;
float zoom = float(highresWidth) / float(lowresWidth);
hrx = (H[0] * x + H[1] * y + H[2]) * zoom;
hry = (H[3] * x + H[4] * y + H[5]) * zoom;
float weightSum = 0.0f;
const size_t maxRowElems = (2 * pixelRadius + 1) * (2 * pixelRadius + 1);
size_t offsetCRS = 0;
size_t offsetRows = maxRowElems * r;
// Iterate over the neighborhood defined by the width of the psf
for (int offsetY = -pixelRadius; offsetY <= pixelRadius; ++offsetY)
{
const int ny = roundToInt(hry + offsetY);
if (ny < 0 || ny >= highresHeight)
continue;
for (int offsetX = -pixelRadius; offsetX <= pixelRadius; ++offsetX)
{
const int nx = roundToInt(hrx + offsetX);
if (nx < 0 || nx >= highresWidth)
continue;
const float dx = hrx - float(nx);
const float dy = hry - float(ny);
// Compute influence of current high-res pixel for
// this thread's low-res pixel
float dist = dx*dx*H[0]*H[0] + dy*dy*H[4]*H[4] +
dx*dy*H[0]*H[3] + dx*dy*H[1]*H[4];
float weight = expf(-dist / (2.0f * zoom * zoom * psfWidth * psfWidth));
const size_t valIdx = offsetRows + offsetCRS;
systemMatrixVals[valIdx] = weight;
systemMatrixCols[valIdx] = ny * highresWidth + nx;
weightSum += weight;
++offsetCRS;
}
}
if (weightSum > 0.0f)
{
// Normalize row sums
for (size_t i = 0; i < offsetCRS; ++i)
{
systemMatrixVals[offsetRows + i] /= weightSum;
}
}
// If we have saved less than maxRowElems elements,
// we have to pad the CRS structure with 0 entries
// to make sure it is valid
if (offsetCRS == 0)
{
systemMatrixVals[offsetRows] = 0.0f;
systemMatrixCols[offsetRows] = 0;
++offsetCRS;
}
bool copy = false;
// Try adding elements after the last saved entry
while (offsetCRS < maxRowElems)
{
const size_t idx = offsetRows + offsetCRS;
if (systemMatrixCols[idx - 1] + 1 >= highresWidth * highresHeight)
{
copy = true;
break;
}
systemMatrixVals[idx] = 0.0f;
systemMatrixCols[idx] = systemMatrixCols[idx - 1] + 1;
offsetCRS++;
}
// If there isn't enough space after the last saved
// entry, add padding before first entry
if (copy)
{
for (int idx = offsetCRS - 1; idx >= 0; --idx)
{
systemMatrixVals[offsetRows + maxRowElems - (offsetCRS - idx)] =
systemMatrixVals[offsetRows + idx];
systemMatrixCols[offsetRows + maxRowElems - (offsetCRS - idx)] =
systemMatrixCols[offsetRows + idx];
}
for (int idx = maxRowElems - offsetCRS - 1; idx >= 0; --idx)
{
systemMatrixVals[offsetRows + idx] = 0.0f;
systemMatrixCols[offsetRows + idx] = systemMatrixCols[offsetRows + idx + 1] - 1;
}
}
systemMatrixRows[r] = r * maxRowElems;
}
}
| d9abfa99fb20e65094f74516b9e4aabe13e9bfcb.cu | /**
* ___ _ _ ___ _ __ __ _ ___
* / __| | | | \ /_\ | \/ | /_\ | _ \
* | (__| |_| | |) / _ \ | |\/| |/ _ \| _/
* \___|\___/|___/_/_\_\_|_|__|_/_/_\_\_|_ ___
* / __| | | | _ \ __| _ \___| _ \ __/ __|
* \__ \ |_| | _/ _|| /___| / _|\__ \
* |___/\___/|_| |___|_|_\ |_|_\___|___/
* 2012
*
* by Jens Wetzl ([email protected])
* and Oliver Taubmann ([email protected])
*
* This work is licensed under a Creative Commons
* Attribution 3.0 Unported License. (CC-BY)
* http://creativecommons.org/licenses/by/3.0/
*
**/
#include "SRSystemMatrix.h"
#include "cudalbfgs_error_checking.h"
#include <CudaLBFGS/timer.h>
using namespace std;
namespace gpu_SRSystemMatrix
{
__global__ void composeSingleSystem(const size_t offset, const float *H,
const size_t lowresWidth, const size_t lowresHeight,
const size_t highresWidth, const size_t highresHeight,
const float psfWidth, const int pixelRadius,
float *systemMatrixVals, int *systemMatrixCols,
int *systemMatrixRows);
}
SRSystemMatrix::SRSystemMatrix(const vector<MotionParams> &motionParams, const float psfWidth,
const LRImageStack &lrImages, const SRImage &srImage, const GPUHandles &gpuhandles,
const float radiusScale)
: m_height(lrImages.getNumImagePixels() * lrImages.getNumImages())
, m_width(srImage.getNumPixels())
, m_psfWidth(psfWidth)
, m_radiusScale(radiusScale)
, m_motionParams(motionParams)
, m_lrImages(lrImages)
, m_srImage(srImage)
, m_gpuHandles(gpuhandles)
, m_d_values_ccs(0)
, m_d_colPointers_ccs(0)
, m_d_rowIndices_ccs(0)
{
compose();
}
SRSystemMatrix::~SRSystemMatrix()
{
CudaSafeCall( cudaFree(m_d_values) );
CudaSafeCall( cudaFree(m_d_rowPointers) );
CudaSafeCall( cudaFree(m_d_colIndices) );
#ifdef SUPERRES_STORE_TRANSPOSE
CudaSafeCall( cudaFree(m_d_values_ccs) );
CudaSafeCall( cudaFree(m_d_rowIndices_ccs) );
CudaSafeCall( cudaFree(m_d_colPointers_ccs) );
#endif
}
void SRSystemMatrix::compose()
{
#ifdef SUPERRES_TIMING
timer composeTimer("composeSystemMatrix");
composeTimer.start();
#endif
const float zoom = float(m_srImage.getWidth()) / float(m_lrImages.getImageWidth());
const float maxPsfRadius = m_radiusScale * zoom * m_psfWidth;
int pixelRadius = (int)floor(maxPsfRadius + 0.5f);
if (2 * pixelRadius + 1 >= std::min(m_srImage.getWidth(), m_srImage.getHeight()))
{
cout << "WARNING: With the chosen settings for radius scale, zoom and psfWidth," <<
"the point spread function covers the whole SR image." << endl;
pixelRadius = (std::min(m_srImage.getWidth(), m_srImage.getHeight()) - 1) / 2;
}
// Allocate memory for CRS (and CCS)
// The number of non-zero elements per matrix row is (2 * pixelRadius + 1)^2
size_t numValElements = (2 * pixelRadius + 1) * (2 * pixelRadius + 1) * m_height;
CudaSafeCall( cudaMalloc((void**) &m_d_values, numValElements * sizeof(float)) );
CudaSafeCall( cudaMalloc((void**) &m_d_colIndices, numValElements * sizeof(int) ) );
CudaSafeCall( cudaMalloc((void**) &m_d_rowPointers, (m_height + 1) * sizeof(int) ) );
cudaMemset(m_d_colIndices, 0, numValElements * sizeof(int));
#ifdef SUPERRES_STORE_TRANSPOSE
CudaSafeCall( cudaMalloc((void**) &m_d_values_ccs, numValElements * sizeof(float)) );
CudaSafeCall( cudaMalloc((void**) &m_d_rowIndices_ccs, numValElements * sizeof(int) ) );
CudaSafeCall( cudaMalloc((void**) &m_d_colPointers_ccs, (m_width + 1) * sizeof(int) ) );
#endif
size_t offset = 0;
// Copy motion parameters to GPU
float *d_motionparams;
CudaSafeCall( cudaMalloc((void**) &d_motionparams, m_motionParams.size() * 9 * sizeof(float)) );
CudaSafeCall( cudaMemcpy(d_motionparams, &m_motionParams[0],
m_motionParams.size() * 9 * sizeof(float), cudaMemcpyHostToDevice) );
// Compose the equation systems for each low-res image
for (size_t i = 0; i < m_lrImages.getNumImages(); ++i)
{
composeSingleSystem(offset, i, pixelRadius, d_motionparams);
offset += m_lrImages.getNumImagePixels();
}
cudaDeviceSynchronize();
// The last element of the CRS row pointer is the number of non-zero elements
// The other entries of the row pointer array are set in the kernel
CudaSafeCall( cudaMemcpy(m_d_rowPointers + m_height, &numValElements,
sizeof(int), cudaMemcpyHostToDevice) );
CudaSafeCall( cudaFree(d_motionparams) );
#ifdef SUPERRES_STORE_TRANSPOSE
// Create CCS structure from CRS structure
CusparseSafeCall( cusparseScsr2csc(m_gpuHandles.cusparseHandle, m_height, m_width, m_d_values,
m_d_rowPointers, m_d_colIndices, m_d_values_ccs, m_d_rowIndices_ccs,
m_d_colPointers_ccs, 1, CUSPARSE_INDEX_BASE_ZERO) );
#endif
#ifdef SUPERRES_TIMING
composeTimer.stop();
composeTimer.saveMeasurement();
#endif
}
void SRSystemMatrix::composeSingleSystem(const size_t offset, const size_t motionIdx, const int pixelRadius,
float *d_motionparams)
{
size_t height = m_lrImages.getNumImagePixels();
dim3 blockDim(512);
dim3 gridDim = dim3(height % blockDim.x == 0 ? height / blockDim.x : (height / blockDim.x) + 1);
gpu_SRSystemMatrix::composeSingleSystem<<<gridDim, blockDim>>>(offset, &d_motionparams[9 * motionIdx],
m_lrImages.getImageWidth(), m_lrImages.getImageHeight(),
m_srImage.getWidth(), m_srImage.getHeight(),
m_psfWidth, pixelRadius,
m_d_values, m_d_colIndices, m_d_rowPointers);
CudaCheckError();
}
namespace gpu_SRSystemMatrix
{
__device__ int roundToInt(float val)
{
return (int)floor(val + 0.5f);
}
__global__ void composeSingleSystem(const size_t offset, const float *H,
const size_t lowresWidth, const size_t lowresHeight,
const size_t highresWidth, const size_t highresHeight,
const float psfWidth, const int pixelRadius,
float *systemMatrixVals, int *systemMatrixCols,
int *systemMatrixRows)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
const size_t lowresPixels = lowresWidth * lowresHeight;
if (idx >= lowresPixels)
return;
// Coordinates of this thread in the low-res image
size_t x = idx % lowresWidth;
size_t y = idx / lowresWidth;
// Row that this thread writes in the full system matrix
size_t r = idx + offset;
// Transform pixel coordinates from the LR grid to the desired HR grid
float hrx, hry;
float zoom = float(highresWidth) / float(lowresWidth);
hrx = (H[0] * x + H[1] * y + H[2]) * zoom;
hry = (H[3] * x + H[4] * y + H[5]) * zoom;
float weightSum = 0.0f;
const size_t maxRowElems = (2 * pixelRadius + 1) * (2 * pixelRadius + 1);
size_t offsetCRS = 0;
size_t offsetRows = maxRowElems * r;
// Iterate over the neighborhood defined by the width of the psf
for (int offsetY = -pixelRadius; offsetY <= pixelRadius; ++offsetY)
{
const int ny = roundToInt(hry + offsetY);
if (ny < 0 || ny >= highresHeight)
continue;
for (int offsetX = -pixelRadius; offsetX <= pixelRadius; ++offsetX)
{
const int nx = roundToInt(hrx + offsetX);
if (nx < 0 || nx >= highresWidth)
continue;
const float dx = hrx - float(nx);
const float dy = hry - float(ny);
// Compute influence of current high-res pixel for
// this thread's low-res pixel
float dist = dx*dx*H[0]*H[0] + dy*dy*H[4]*H[4] +
dx*dy*H[0]*H[3] + dx*dy*H[1]*H[4];
float weight = expf(-dist / (2.0f * zoom * zoom * psfWidth * psfWidth));
const size_t valIdx = offsetRows + offsetCRS;
systemMatrixVals[valIdx] = weight;
systemMatrixCols[valIdx] = ny * highresWidth + nx;
weightSum += weight;
++offsetCRS;
}
}
if (weightSum > 0.0f)
{
// Normalize row sums
for (size_t i = 0; i < offsetCRS; ++i)
{
systemMatrixVals[offsetRows + i] /= weightSum;
}
}
// If we have saved less than maxRowElems elements,
// we have to pad the CRS structure with 0 entries
// to make sure it is valid
if (offsetCRS == 0)
{
systemMatrixVals[offsetRows] = 0.0f;
systemMatrixCols[offsetRows] = 0;
++offsetCRS;
}
bool copy = false;
// Try adding elements after the last saved entry
while (offsetCRS < maxRowElems)
{
const size_t idx = offsetRows + offsetCRS;
if (systemMatrixCols[idx - 1] + 1 >= highresWidth * highresHeight)
{
copy = true;
break;
}
systemMatrixVals[idx] = 0.0f;
systemMatrixCols[idx] = systemMatrixCols[idx - 1] + 1;
offsetCRS++;
}
// If there isn't enough space after the last saved
// entry, add padding before first entry
if (copy)
{
for (int idx = offsetCRS - 1; idx >= 0; --idx)
{
systemMatrixVals[offsetRows + maxRowElems - (offsetCRS - idx)] =
systemMatrixVals[offsetRows + idx];
systemMatrixCols[offsetRows + maxRowElems - (offsetCRS - idx)] =
systemMatrixCols[offsetRows + idx];
}
for (int idx = maxRowElems - offsetCRS - 1; idx >= 0; --idx)
{
systemMatrixVals[offsetRows + idx] = 0.0f;
systemMatrixCols[offsetRows + idx] = systemMatrixCols[offsetRows + idx + 1] - 1;
}
}
systemMatrixRows[r] = r * maxRowElems;
}
}
|
9224d02241d9a855140eb4a3b13cfdd4f77cf1ed.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaCommon.h"
/* THIS FUNCTION
This function calculates the first-order accurate upwind advection of the magnetic field
'mag' by velocity 'velGrid' and stores the timestepped value in 'Bw'.
*/
__global__ void cukern_magnetWstep_uniformX(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int nx);
__global__ void cukern_magnetWstep_uniformY(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int3 dims);
__global__ void cukern_magnetWstep_uniformZ(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int3 dims);
#define BLOCKDIMA 18
#define BLOCKDIMAM2 16
#define BLOCKDIMB 8
#define BLOCKLEN 128
#define BLOCKLENP4 132
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if ((nrhs!=4) || (nlhs != 2)) mexErrMsgTxt("Wrong number of arguments: need [magW,velFlow] = cudaMagWflux(mag, velgrid, lambda, dir)\n");
CHECK_CUDA_ERROR("entering cudaMagW");
// Get source array info and create destination arrays
MGArray src[2];
int worked = MGA_accessMatlabArrays(prhs, 0, 1, src);
MGArray *dst = MGA_createReturnedArrays(plhs, 2, src);
// Establish launch dimensions & a few other parameters
int fluxDirection = (int)*mxGetPr(prhs[3]);
double lambda = *mxGetPr(prhs[2]);
int3 arraySize;
arraySize.x = src->dim[0];
arraySize.y = src->dim[1];
arraySize.z = src->dim[2];
dim3 blocksize, gridsize;
switch(fluxDirection) {
case 1: // X direction flux. This is "priveleged" in that the shift and natural memory load directions align
blocksize.x = BLOCKLEN+4; blocksize.y = blocksize.z = 1;
gridsize.x = arraySize.y;
gridsize.y = arraySize.z;
hipLaunchKernelGGL(( cukern_magnetWstep_uniformX), dim3(gridsize) , dim3(blocksize), 0, 0, src[0].devicePtr[0], src[1].devicePtr[0], dst[0].devicePtr[0], dst[1].devicePtr[0], lambda, arraySize.x);
break;
case 2: // Y direction flux: u = y, v = x, w = z
blocksize.x = BLOCKDIMB; blocksize.y = BLOCKDIMAM2;
gridsize.x = arraySize.x / blocksize.x; gridsize.x += 1*(blocksize.x*gridsize.x < arraySize.x);
gridsize.y = arraySize.y / blocksize.y; gridsize.y += 1*(blocksize.y*gridsize.y < arraySize.y);
blocksize.y = BLOCKDIMA;
hipLaunchKernelGGL(( cukern_magnetWstep_uniformY), dim3(gridsize) , dim3(blocksize), 0, 0, src[0].devicePtr[0], src[1].devicePtr[0], dst[0].devicePtr[0], dst[1].devicePtr[0], lambda, arraySize);
break;
case 3: // Z direction flux: u = z, v = x, w = y;
blocksize.x = BLOCKDIMB; blocksize.y = BLOCKDIMAM2;
gridsize.x = arraySize.x / blocksize.x; gridsize.x += 1*(blocksize.x*gridsize.x < arraySize.x);
gridsize.y = arraySize.z / blocksize.y; gridsize.y += 1*(blocksize.y*gridsize.y < arraySize.z);
blocksize.y = BLOCKDIMA;
hipLaunchKernelGGL(( cukern_magnetWstep_uniformZ), dim3(gridsize) , dim3(blocksize), 0, 0, src[0].devicePtr[0], src[1].devicePtr[0], dst[0].devicePtr[0], dst[1].devicePtr[0], lambda, arraySize);
break;
}
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, src, fluxDirection, "magnetic W step");
free(dst);
}
__global__ void cukern_magnetWstep_uniformX(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int nx)
{
double v;
double b;
double bv;
double locVelFlow;
__shared__ double flux[BLOCKLENP4];
/* Step 0 - obligatory annoying setup stuff (ASS) */
int I0 = nx * (blockIdx.x + gridDim.x * blockIdx.y);
int Xindex = (threadIdx.x-2);
int Xtrack = Xindex;
Xindex += nx * (threadIdx.x < 2);
int x;
bool doIflux = (threadIdx.x > 1) && (threadIdx.x < BLOCKLEN+2);
while(Xtrack < nx + 2) {
x = I0 + (Xindex % nx) ;
v = velGrid[x];
b = mag[x];
// First step - calculate velocityflow
flux[threadIdx.x] = v;
__syncthreads();
locVelFlow = (flux[threadIdx.x] + flux[(threadIdx.x + 1)%BLOCKLENP4]);
if(locVelFlow < 0.0) { locVelFlow = 1.0; } else { locVelFlow = 0.0; }
__syncthreads();
// Second step - calculate flux
bv = b * v;
flux[threadIdx.x] = bv;
__syncthreads();
bv = bv * (1 - locVelFlow) + flux[(threadIdx.x + 1)%BLOCKLENP4] * locVelFlow;
__syncthreads();
flux[threadIdx.x] = bv;
__syncthreads();
// Third step - Perform flux and write to output array
if( doIflux && (Xindex < nx) ) {
bW[x] = b - lambda * ( bv - flux[threadIdx.x - 1] );
velFlow[x] = locVelFlow;
}
Xindex += BLOCKLEN;
Xtrack += BLOCKLEN;
__syncthreads();
}
}
__global__ void cukern_magnetWstep_uniformY(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int3 dims)
{
double v, b, locVelFlow;
__shared__ double tile[BLOCKDIMB][BLOCKDIMA];
__shared__ double flux[BLOCKDIMB][BLOCKDIMA];
// Dimensions into the array
int myx = blockIdx.x*BLOCKDIMB + threadIdx.x;
int myy = blockIdx.y*BLOCKDIMAM2 + threadIdx.y - 1;
if((myx >= dims.x) || (myy > dims.y)) return; // we keep an extra Y thread for the finite diff.
bool IWrite = (threadIdx.y > 0) && (threadIdx.y <= BLOCKDIMAM2) && (myy < dims.y) && (myy >= 0);
// Exclude threads at the boundary of the fluxing direction from writing back
if(myy < 0) myy += dims.y; // wrap left edge back to right edge
myy = myy % dims.y; // wrap right edge back to left
int x = myx + dims.x*myy;
int z;
for(z = 0; z < dims.z; z++) {
v = velGrid[x];
b = mag[x];
// first calculate velocityFlow
tile[threadIdx.x][threadIdx.y] = v;
flux[threadIdx.x][threadIdx.y] = b*v;
__syncthreads();
locVelFlow = (tile[threadIdx.x][threadIdx.y] + tile[threadIdx.x][(threadIdx.y+1) % BLOCKDIMA]);
if(locVelFlow < 0.0) { locVelFlow = 1.0; } else { locVelFlow = 0.0; }
__syncthreads();
// Second step - calculate flux
if(locVelFlow == 1) { tile[threadIdx.x][threadIdx.y] = flux[threadIdx.x][(threadIdx.y + 1)%BLOCKDIMA]; } else
{ tile[threadIdx.x][threadIdx.y] = flux[threadIdx.x][threadIdx.y]; }
__syncthreads();
// Third step - Perform flux and write to output array
if( IWrite ) {
bW[x] = b - lambda * ( tile[threadIdx.x][threadIdx.y] - tile[threadIdx.x][threadIdx.y-1]);
velFlow[x] = locVelFlow;
}
x += dims.x*dims.y;
__syncthreads();
}
}
__global__ void cukern_magnetWstep_uniformZ(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int3 dims)
{
double v, b, locVelFlow;
__shared__ double tile[BLOCKDIMB][BLOCKDIMA];
__shared__ double flux[BLOCKDIMB][BLOCKDIMA];
int myx = blockIdx.x*BLOCKDIMB + threadIdx.x;
int myz = blockIdx.y*BLOCKDIMAM2 + threadIdx.y - 1;
if((myx >= dims.x) || (myz > dims.z)) return; // we keep an extra Y thread for the finite diff.
bool IWrite = (threadIdx.y > 0) && (threadIdx.y <= BLOCKDIMAM2) && (myz < dims.y) && (myz >= 0);
// Exclude threads at the boundary of the fluxing direction from writing back
if(myz < 0) myz += dims.z; // wrap left edge back to right edge
myz = myz % dims.z; // wrap right edge back to left
int x = myx + dims.x*dims.y*myz;
int y;
for(y = 0; y < dims.y; y++) {
v = velGrid[x];
b = mag[x];
// first calculate velocityFlow
tile[threadIdx.x][threadIdx.y] = v;
flux[threadIdx.x][threadIdx.y] = b*v;
__syncthreads();
locVelFlow = (tile[threadIdx.x][threadIdx.y] + tile[threadIdx.x][(threadIdx.y+1) % BLOCKDIMA]);
if(locVelFlow < 0.0) { locVelFlow = 1.0; } else { locVelFlow = 0.0; }
__syncthreads();
// Second step - calculate flux
if(locVelFlow == 1) { tile[threadIdx.x][threadIdx.y] = flux[threadIdx.x][(threadIdx.y + 1)%BLOCKDIMA]; } else
{ tile[threadIdx.x][threadIdx.y] = flux[threadIdx.x][threadIdx.y]; }
__syncthreads();
// Third step - Perform flux and write to output array
if( IWrite ) {
bW[x] = b - lambda * ( tile[threadIdx.x][threadIdx.y] - tile[threadIdx.x][threadIdx.y-1]);
velFlow[x] = locVelFlow;
}
x += dims.x;
__syncthreads();
}
}
| 9224d02241d9a855140eb4a3b13cfdd4f77cf1ed.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaCommon.h"
/* THIS FUNCTION
This function calculates the first-order accurate upwind advection of the magnetic field
'mag' by velocity 'velGrid' and stores the timestepped value in 'Bw'.
*/
__global__ void cukern_magnetWstep_uniformX(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int nx);
__global__ void cukern_magnetWstep_uniformY(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int3 dims);
__global__ void cukern_magnetWstep_uniformZ(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int3 dims);
#define BLOCKDIMA 18
#define BLOCKDIMAM2 16
#define BLOCKDIMB 8
#define BLOCKLEN 128
#define BLOCKLENP4 132
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if ((nrhs!=4) || (nlhs != 2)) mexErrMsgTxt("Wrong number of arguments: need [magW,velFlow] = cudaMagWflux(mag, velgrid, lambda, dir)\n");
CHECK_CUDA_ERROR("entering cudaMagW");
// Get source array info and create destination arrays
MGArray src[2];
int worked = MGA_accessMatlabArrays(prhs, 0, 1, src);
MGArray *dst = MGA_createReturnedArrays(plhs, 2, src);
// Establish launch dimensions & a few other parameters
int fluxDirection = (int)*mxGetPr(prhs[3]);
double lambda = *mxGetPr(prhs[2]);
int3 arraySize;
arraySize.x = src->dim[0];
arraySize.y = src->dim[1];
arraySize.z = src->dim[2];
dim3 blocksize, gridsize;
switch(fluxDirection) {
case 1: // X direction flux. This is "priveleged" in that the shift and natural memory load directions align
blocksize.x = BLOCKLEN+4; blocksize.y = blocksize.z = 1;
gridsize.x = arraySize.y;
gridsize.y = arraySize.z;
cukern_magnetWstep_uniformX<<<gridsize , blocksize>>>(src[0].devicePtr[0], src[1].devicePtr[0], dst[0].devicePtr[0], dst[1].devicePtr[0], lambda, arraySize.x);
break;
case 2: // Y direction flux: u = y, v = x, w = z
blocksize.x = BLOCKDIMB; blocksize.y = BLOCKDIMAM2;
gridsize.x = arraySize.x / blocksize.x; gridsize.x += 1*(blocksize.x*gridsize.x < arraySize.x);
gridsize.y = arraySize.y / blocksize.y; gridsize.y += 1*(blocksize.y*gridsize.y < arraySize.y);
blocksize.y = BLOCKDIMA;
cukern_magnetWstep_uniformY<<<gridsize , blocksize>>>(src[0].devicePtr[0], src[1].devicePtr[0], dst[0].devicePtr[0], dst[1].devicePtr[0], lambda, arraySize);
break;
case 3: // Z direction flux: u = z, v = x, w = y;
blocksize.x = BLOCKDIMB; blocksize.y = BLOCKDIMAM2;
gridsize.x = arraySize.x / blocksize.x; gridsize.x += 1*(blocksize.x*gridsize.x < arraySize.x);
gridsize.y = arraySize.z / blocksize.y; gridsize.y += 1*(blocksize.y*gridsize.y < arraySize.z);
blocksize.y = BLOCKDIMA;
cukern_magnetWstep_uniformZ<<<gridsize , blocksize>>>(src[0].devicePtr[0], src[1].devicePtr[0], dst[0].devicePtr[0], dst[1].devicePtr[0], lambda, arraySize);
break;
}
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, src, fluxDirection, "magnetic W step");
free(dst);
}
__global__ void cukern_magnetWstep_uniformX(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int nx)
{
double v;
double b;
double bv;
double locVelFlow;
__shared__ double flux[BLOCKLENP4];
/* Step 0 - obligatory annoying setup stuff (ASS) */
int I0 = nx * (blockIdx.x + gridDim.x * blockIdx.y);
int Xindex = (threadIdx.x-2);
int Xtrack = Xindex;
Xindex += nx * (threadIdx.x < 2);
int x;
bool doIflux = (threadIdx.x > 1) && (threadIdx.x < BLOCKLEN+2);
while(Xtrack < nx + 2) {
x = I0 + (Xindex % nx) ;
v = velGrid[x];
b = mag[x];
// First step - calculate velocityflow
flux[threadIdx.x] = v;
__syncthreads();
locVelFlow = (flux[threadIdx.x] + flux[(threadIdx.x + 1)%BLOCKLENP4]);
if(locVelFlow < 0.0) { locVelFlow = 1.0; } else { locVelFlow = 0.0; }
__syncthreads();
// Second step - calculate flux
bv = b * v;
flux[threadIdx.x] = bv;
__syncthreads();
bv = bv * (1 - locVelFlow) + flux[(threadIdx.x + 1)%BLOCKLENP4] * locVelFlow;
__syncthreads();
flux[threadIdx.x] = bv;
__syncthreads();
// Third step - Perform flux and write to output array
if( doIflux && (Xindex < nx) ) {
bW[x] = b - lambda * ( bv - flux[threadIdx.x - 1] );
velFlow[x] = locVelFlow;
}
Xindex += BLOCKLEN;
Xtrack += BLOCKLEN;
__syncthreads();
}
}
__global__ void cukern_magnetWstep_uniformY(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int3 dims)
{
double v, b, locVelFlow;
__shared__ double tile[BLOCKDIMB][BLOCKDIMA];
__shared__ double flux[BLOCKDIMB][BLOCKDIMA];
// Dimensions into the array
int myx = blockIdx.x*BLOCKDIMB + threadIdx.x;
int myy = blockIdx.y*BLOCKDIMAM2 + threadIdx.y - 1;
if((myx >= dims.x) || (myy > dims.y)) return; // we keep an extra Y thread for the finite diff.
bool IWrite = (threadIdx.y > 0) && (threadIdx.y <= BLOCKDIMAM2) && (myy < dims.y) && (myy >= 0);
// Exclude threads at the boundary of the fluxing direction from writing back
if(myy < 0) myy += dims.y; // wrap left edge back to right edge
myy = myy % dims.y; // wrap right edge back to left
int x = myx + dims.x*myy;
int z;
for(z = 0; z < dims.z; z++) {
v = velGrid[x];
b = mag[x];
// first calculate velocityFlow
tile[threadIdx.x][threadIdx.y] = v;
flux[threadIdx.x][threadIdx.y] = b*v;
__syncthreads();
locVelFlow = (tile[threadIdx.x][threadIdx.y] + tile[threadIdx.x][(threadIdx.y+1) % BLOCKDIMA]);
if(locVelFlow < 0.0) { locVelFlow = 1.0; } else { locVelFlow = 0.0; }
__syncthreads();
// Second step - calculate flux
if(locVelFlow == 1) { tile[threadIdx.x][threadIdx.y] = flux[threadIdx.x][(threadIdx.y + 1)%BLOCKDIMA]; } else
{ tile[threadIdx.x][threadIdx.y] = flux[threadIdx.x][threadIdx.y]; }
__syncthreads();
// Third step - Perform flux and write to output array
if( IWrite ) {
bW[x] = b - lambda * ( tile[threadIdx.x][threadIdx.y] - tile[threadIdx.x][threadIdx.y-1]);
velFlow[x] = locVelFlow;
}
x += dims.x*dims.y;
__syncthreads();
}
}
__global__ void cukern_magnetWstep_uniformZ(double *mag, double *velGrid, double *bW, double *velFlow, double lambda, int3 dims)
{
double v, b, locVelFlow;
__shared__ double tile[BLOCKDIMB][BLOCKDIMA];
__shared__ double flux[BLOCKDIMB][BLOCKDIMA];
int myx = blockIdx.x*BLOCKDIMB + threadIdx.x;
int myz = blockIdx.y*BLOCKDIMAM2 + threadIdx.y - 1;
if((myx >= dims.x) || (myz > dims.z)) return; // we keep an extra Y thread for the finite diff.
bool IWrite = (threadIdx.y > 0) && (threadIdx.y <= BLOCKDIMAM2) && (myz < dims.y) && (myz >= 0);
// Exclude threads at the boundary of the fluxing direction from writing back
if(myz < 0) myz += dims.z; // wrap left edge back to right edge
myz = myz % dims.z; // wrap right edge back to left
int x = myx + dims.x*dims.y*myz;
int y;
for(y = 0; y < dims.y; y++) {
v = velGrid[x];
b = mag[x];
// first calculate velocityFlow
tile[threadIdx.x][threadIdx.y] = v;
flux[threadIdx.x][threadIdx.y] = b*v;
__syncthreads();
locVelFlow = (tile[threadIdx.x][threadIdx.y] + tile[threadIdx.x][(threadIdx.y+1) % BLOCKDIMA]);
if(locVelFlow < 0.0) { locVelFlow = 1.0; } else { locVelFlow = 0.0; }
__syncthreads();
// Second step - calculate flux
if(locVelFlow == 1) { tile[threadIdx.x][threadIdx.y] = flux[threadIdx.x][(threadIdx.y + 1)%BLOCKDIMA]; } else
{ tile[threadIdx.x][threadIdx.y] = flux[threadIdx.x][threadIdx.y]; }
__syncthreads();
// Third step - Perform flux and write to output array
if( IWrite ) {
bW[x] = b - lambda * ( tile[threadIdx.x][threadIdx.y] - tile[threadIdx.x][threadIdx.y-1]);
velFlow[x] = locVelFlow;
}
x += dims.x;
__syncthreads();
}
}
|
b718b340f7a9c7087eb4396be78b7b3c8f190e09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_utils.h"
#include "graph_pooling.h"
// input: features(b, c, n), knn_graph(b, n, npoints)
// output: outputs(b, c, n), idxs(b, c, n)
__global__ void graph_max_pooling_kernel(int b, int c, int n, int npoints,
const float *__restrict__ features,
const int *__restrict__ knn_graph,
float *__restrict__ outputs,
int *__restrict__ idxs) {
const int batch_index = blockIdx.x;
features += batch_index * c * n;
knn_graph += batch_index * n * npoints;
outputs += batch_index * c * n;
idxs += batch_index * c * n;
for (int i = threadIdx.y; i < c; i += blockDim.y) {
for (int j = threadIdx.x; j < n; j += blockDim.x) {
int besti = -1;
float best = -1e6;
for (int k = 0; k < npoints; ++k) {
int id = knn_graph[j * npoints + k];
float f = features[i * n + id];
if(best < f) {
best = f;
besti = id;
}
}
outputs[i * n + j] = best;
idxs[i * n + j] = besti;
}
}
}
// input: grad_outputs(b, c, n), idxs(b, c, n)
// output: grad_inputs(b, c, n)
__global__ void graph_max_pooling_grad_kernel(int b, int c, int n,
const float *__restrict__ grad_outputs,
const int *__restrict__ idxs,
float *__restrict__ grad_inputs) {
const int batch_index = blockIdx.x;
grad_outputs += batch_index * c * n;
idxs += batch_index * c * n;
grad_inputs += batch_index * c * n;
for (int i = threadIdx.y; i < c; i += blockDim.y) {
for (int j = threadIdx.x; j < n; j += blockDim.x) {
atomicAdd(grad_inputs + i * n + idxs[i * n + j], grad_outputs[i * n + j]);
}
}
}
// input: features(b, c, n), knn_graph(b, n, npoints), weights(b, n, npoints)
// output: outputs(b, c, n)
__global__ void graph_pooling_kernel(int b, int c, int n, int npoints,
const float *__restrict__ features,
const int *__restrict__ knn_graph,
const float *__restrict__ weights,
float *__restrict__ outputs) {
const int batch_index = blockIdx.x;
features += batch_index * c * n;
knn_graph += batch_index * n * npoints;
weights += batch_index * n * npoints;
outputs += batch_index * c * n;
for (int i = threadIdx.y; i < c; i += blockDim.y) {
for (int j = threadIdx.x; j < n; j += blockDim.x) {
float f = 0.;
for (int k = 0; k < npoints; ++k) {
int id = knn_graph[j * npoints + k];
f += features[i * n + id] * weights[j * npoints + k];
}
outputs[i * n + j] = f;
}
}
}
// input: grad_outputs(b, c, n), knn_graph(b, n, npoints), weights(b, n, npoints)
// output: grad_inputs(b, c, n)
__global__ void graph_pooling_grad_kernel(int b, int c, int n, int npoints,
const float *__restrict__ grad_outputs,
const int *__restrict__ knn_graph,
const float *__restrict__ weights,
float *__restrict__ grad_inputs) {
const int batch_index = blockIdx.x;
grad_outputs += batch_index * c * n;
knn_graph += batch_index * n * npoints;
weights += batch_index * n * npoints;
grad_inputs += batch_index * c * n;
for (int i = threadIdx.y; i < c; i += blockDim.y) {
for (int j = threadIdx.x; j < n; j += blockDim.x) {
for (int k = 0; k < npoints; ++k) {
atomicAdd(grad_inputs + i * n + knn_graph[j * npoints + k],
grad_outputs[i * n + j] * weights[j * npoints + k]);
}
}
}
}
void graph_max_pooling_kernel_wrapper(int b, int c, int n, int npoints,
const float *features, const int *knn_graph,
float *outputs, int *idxs) {
hipError_t err;
hipLaunchKernelGGL(( graph_max_pooling_kernel), dim3(b), dim3(opt_block_config(n, c)), 0, 0, b, c, n, npoints, features, knn_graph, outputs, idxs);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
void graph_max_pooling_grad_kernel_wrapper(int b, int c, int n,
const float *grad_outputs, const int *idxs,
float *grad_inputs) {
hipError_t err;
hipLaunchKernelGGL(( graph_max_pooling_grad_kernel), dim3(b), dim3(opt_block_config(n, c)), 0, 0, b, c, n, grad_outputs, idxs, grad_inputs);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
void graph_pooling_kernel_wrapper(int b, int c, int n, int npoints,
const float *features, const int *knn_graph, const float *weights,
float *outputs) {
hipError_t err;
hipLaunchKernelGGL(( graph_pooling_kernel), dim3(b), dim3(opt_block_config(n, c)), 0, 0, b, c, n, npoints, features, knn_graph, weights, outputs);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
void graph_pooling_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_outputs, const int *knn_graph, const float *weights,
float *grad_inputs) {
hipError_t err;
hipLaunchKernelGGL(( graph_pooling_grad_kernel), dim3(b), dim3(opt_block_config(n, c)), 0, 0, b, c, n, npoints, grad_outputs, knn_graph, weights, grad_inputs);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| b718b340f7a9c7087eb4396be78b7b3c8f190e09.cu | #include "cuda_utils.h"
#include "graph_pooling.h"
// input: features(b, c, n), knn_graph(b, n, npoints)
// output: outputs(b, c, n), idxs(b, c, n)
__global__ void graph_max_pooling_kernel(int b, int c, int n, int npoints,
const float *__restrict__ features,
const int *__restrict__ knn_graph,
float *__restrict__ outputs,
int *__restrict__ idxs) {
const int batch_index = blockIdx.x;
features += batch_index * c * n;
knn_graph += batch_index * n * npoints;
outputs += batch_index * c * n;
idxs += batch_index * c * n;
for (int i = threadIdx.y; i < c; i += blockDim.y) {
for (int j = threadIdx.x; j < n; j += blockDim.x) {
int besti = -1;
float best = -1e6;
for (int k = 0; k < npoints; ++k) {
int id = knn_graph[j * npoints + k];
float f = features[i * n + id];
if(best < f) {
best = f;
besti = id;
}
}
outputs[i * n + j] = best;
idxs[i * n + j] = besti;
}
}
}
// input: grad_outputs(b, c, n), idxs(b, c, n)
// output: grad_inputs(b, c, n)
__global__ void graph_max_pooling_grad_kernel(int b, int c, int n,
const float *__restrict__ grad_outputs,
const int *__restrict__ idxs,
float *__restrict__ grad_inputs) {
const int batch_index = blockIdx.x;
grad_outputs += batch_index * c * n;
idxs += batch_index * c * n;
grad_inputs += batch_index * c * n;
for (int i = threadIdx.y; i < c; i += blockDim.y) {
for (int j = threadIdx.x; j < n; j += blockDim.x) {
atomicAdd(grad_inputs + i * n + idxs[i * n + j], grad_outputs[i * n + j]);
}
}
}
// input: features(b, c, n), knn_graph(b, n, npoints), weights(b, n, npoints)
// output: outputs(b, c, n)
__global__ void graph_pooling_kernel(int b, int c, int n, int npoints,
const float *__restrict__ features,
const int *__restrict__ knn_graph,
const float *__restrict__ weights,
float *__restrict__ outputs) {
const int batch_index = blockIdx.x;
features += batch_index * c * n;
knn_graph += batch_index * n * npoints;
weights += batch_index * n * npoints;
outputs += batch_index * c * n;
for (int i = threadIdx.y; i < c; i += blockDim.y) {
for (int j = threadIdx.x; j < n; j += blockDim.x) {
float f = 0.;
for (int k = 0; k < npoints; ++k) {
int id = knn_graph[j * npoints + k];
f += features[i * n + id] * weights[j * npoints + k];
}
outputs[i * n + j] = f;
}
}
}
// input: grad_outputs(b, c, n), knn_graph(b, n, npoints), weights(b, n, npoints)
// output: grad_inputs(b, c, n)
__global__ void graph_pooling_grad_kernel(int b, int c, int n, int npoints,
const float *__restrict__ grad_outputs,
const int *__restrict__ knn_graph,
const float *__restrict__ weights,
float *__restrict__ grad_inputs) {
const int batch_index = blockIdx.x;
grad_outputs += batch_index * c * n;
knn_graph += batch_index * n * npoints;
weights += batch_index * n * npoints;
grad_inputs += batch_index * c * n;
for (int i = threadIdx.y; i < c; i += blockDim.y) {
for (int j = threadIdx.x; j < n; j += blockDim.x) {
for (int k = 0; k < npoints; ++k) {
atomicAdd(grad_inputs + i * n + knn_graph[j * npoints + k],
grad_outputs[i * n + j] * weights[j * npoints + k]);
}
}
}
}
void graph_max_pooling_kernel_wrapper(int b, int c, int n, int npoints,
const float *features, const int *knn_graph,
float *outputs, int *idxs) {
cudaError_t err;
graph_max_pooling_kernel<<<b, opt_block_config(n, c)>>>(b, c, n, npoints, features, knn_graph, outputs, idxs);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
void graph_max_pooling_grad_kernel_wrapper(int b, int c, int n,
const float *grad_outputs, const int *idxs,
float *grad_inputs) {
cudaError_t err;
graph_max_pooling_grad_kernel<<<b, opt_block_config(n, c)>>>(b, c, n, grad_outputs, idxs, grad_inputs);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
void graph_pooling_kernel_wrapper(int b, int c, int n, int npoints,
const float *features, const int *knn_graph, const float *weights,
float *outputs) {
cudaError_t err;
graph_pooling_kernel<<<b, opt_block_config(n, c)>>>(b, c, n, npoints, features, knn_graph, weights, outputs);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
void graph_pooling_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_outputs, const int *knn_graph, const float *weights,
float *grad_inputs) {
cudaError_t err;
graph_pooling_grad_kernel<<<b, opt_block_config(n, c)>>>(b, c, n, npoints, grad_outputs, knn_graph, weights, grad_inputs);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
f37093853d3d114b8a53192dd9ee5037f9dd937e.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Stava and Benes
#define TILE_SIZE 16
#define TILE_GRID_SIZE 4
#define THREAD_PER_TILE 16
using namespace cv;
namespace {
// Returns the root index of the UFTree
__device__ unsigned Find(const int* s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
__device__ void Union(int* s_buf, unsigned index_a, unsigned index_b, char* changed) {
unsigned a = s_buf[index_a];
if (!a) return;
unsigned b = s_buf[index_b];
if (!b) return;
--a;
--b;
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a != b) {
*changed = 1;
}
if (a < b) {
atomicMin(s_buf + b, a + 1);
}
else if (b < a) {
atomicMin(s_buf + a, b + 1);
}
}
// Perform local CCL on 16x16 tiles
__global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
const unsigned r = threadIdx.y;
const unsigned c = threadIdx.x;
const unsigned local_index = r * blockDim.x + c;
const unsigned global_row = blockIdx.y * blockDim.y + r;
const unsigned global_col = blockIdx.x * blockDim.x + c;
const unsigned img_index = global_row * img.step + global_col;
__shared__ int s_buf[TILE_SIZE * TILE_SIZE];
__shared__ unsigned char s_img[TILE_SIZE * TILE_SIZE];
__shared__ char changed[1];
bool in_limits = (global_row < img.rows&& global_col < img.cols);
s_img[local_index] = in_limits ? img[img_index] : 0;
unsigned char v = s_img[local_index];
int label = v ? local_index + 1 : 0;
__syncthreads();
while (1) {
// Pass 1 of the CCL algorithm
s_buf[local_index] = label;
if (threadIdx.x == 0 && threadIdx.y == 0) {
changed[0] = 0;
}
int new_label = label;
__syncthreads();
// Find the minimal label from the neighboring elements
if (label) {
if (r > 0 && c > 0 && s_img[local_index - TILE_SIZE - 1]) {
new_label = min(new_label, s_buf[local_index - TILE_SIZE - 1]);
}
if (r > 0 && s_img[local_index - TILE_SIZE]) {
new_label = min(new_label, s_buf[local_index - TILE_SIZE]);
}
if (r > 0 && c < TILE_SIZE - 1 && s_img[local_index - TILE_SIZE + 1]) {
new_label = min(new_label, s_buf[local_index - TILE_SIZE + 1]);
}
if (c > 0 && s_img[local_index - 1]) {
new_label = min(new_label, s_buf[local_index - 1]);
}
if (c < TILE_SIZE - 1 && s_img[local_index + 1]) {
new_label = min(new_label, s_buf[local_index + 1]);
}
if (r < TILE_SIZE - 1 && c > 0 && s_img[local_index + TILE_SIZE - 1]) {
new_label = min(new_label, s_buf[local_index + TILE_SIZE - 1]);
}
if (r < TILE_SIZE - 1 && s_img[local_index + TILE_SIZE]) {
new_label = min(new_label, s_buf[local_index + TILE_SIZE]);
}
if (r < TILE_SIZE - 1 && c < TILE_SIZE - 1 && s_img[local_index + TILE_SIZE + 1]) {
new_label = min(new_label, s_buf[local_index + TILE_SIZE + 1]);
}
}
__syncthreads();
// If the new label is smaller than the old one merge the equivalence trees
if (new_label < label) {
atomicMin(s_buf + label - 1, new_label);
changed[0] = 1;
}
__syncthreads();
if (changed[0] == 0)
break;
if (label) {
// Pass 2 of the CCL algorithm
label = Find(s_buf, label - 1) + 1;
}
__syncthreads();
}
if (in_limits) {
// Store the result to the device memory
int global_label = 0;
if (v) {
unsigned f_row = (label - 1) / TILE_SIZE;
unsigned f_col = (label - 1) % TILE_SIZE;
global_label = (blockIdx.y * TILE_SIZE + f_row) * (labels.step / labels.elem_size) + (blockIdx.x * TILE_SIZE + f_col) + 1;
}
labels.data[global_row * labels.step / sizeof(int) + global_col] = global_label;
}
}
__global__ void GlobalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, uint32_t subBlockDim) {
// Coordinates of the top-left pixel of the current block of tiles
unsigned block_row = blockIdx.y * blockDim.y * subBlockDim;
unsigned block_col = blockIdx.x * blockDim.x * subBlockDim;
// Coordinates of the top-left pixel of the current tile
unsigned tile_row = block_row + threadIdx.y * subBlockDim;
unsigned tile_col = block_col + threadIdx.x * subBlockDim;
unsigned repetitions = (subBlockDim + blockDim.z - 1) / blockDim.z;
__shared__ char changed[1];
while (1) {
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
changed[0] = 0;
}
__syncthreads();
// Process the bottom horizontal border
for (unsigned i = 0; i < repetitions; i++) {
unsigned r = tile_row + subBlockDim - 1;
unsigned c = tile_col + i * blockDim.z + threadIdx.z;
if (threadIdx.y < blockDim.y - 1 && r < img.rows - 1 && c < img.cols && c < tile_col + subBlockDim) {
if (c > block_col) {
Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c - 1, changed);
}
Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c, changed);
if (c < img.cols - 1 && c < block_col + blockDim.x * subBlockDim - 1) {
Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c + 1, changed);
}
}
}
// Process the right vertical border
for (unsigned i = 0; i < repetitions; i++) {
unsigned c = tile_col + subBlockDim - 1;
unsigned r = tile_row + i * blockDim.z + threadIdx.z;
if (threadIdx.x < blockDim.x - 1 && c < img.cols - 1 && r < img.rows && r < tile_row + subBlockDim) {
if (r > block_row) {
Union(labels.data, r * labels.step / sizeof(int) + c, (r - 1) * labels.step / sizeof(int) + c + 1, changed);
}
Union(labels.data, r * labels.step / sizeof(int) + c, r * labels.step / sizeof(int) + c + 1, changed);
if (r < img.rows - 1 && r < block_row + blockDim.y * subBlockDim - 1) {
Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c + 1, changed);
}
}
}
__syncthreads();
if (changed[0] == 0) {
break; // the tiles are merged
}
__syncthreads();
}
}
__global__ void BorderCompression(cuda::PtrStepSzi labels, uint32_t subBlockDim) {
// Coordinates of the top-left pixel of the current block of tiles
const unsigned block_row = blockIdx.y * blockDim.y * subBlockDim;
const unsigned block_col = blockIdx.x * blockDim.x * subBlockDim;
// Coordinates of the top-left pixel of the current tile
const unsigned tile_row = block_row + threadIdx.y * subBlockDim;
const unsigned tile_col = block_col + threadIdx.x * subBlockDim;
const unsigned repetitions = (subBlockDim + blockDim.z - 1) / blockDim.z;
// Process the bottom horizontal border
for (unsigned i = 0; i < repetitions; i++) {
const unsigned r = tile_row + subBlockDim - 1;
const unsigned c = tile_col + i * blockDim.z + threadIdx.z;
if (threadIdx.y < blockDim.y - 1 && r < labels.rows - 1 && c < labels.cols && c < tile_col + subBlockDim) {
int label = labels[r * labels.step / sizeof(int) + c];
if (label) {
labels[r * labels.step / sizeof(int) + c] = Find(labels, label - 1) + 1;
}
}
}
// Process the right vertical border
for (unsigned i = 0; i < repetitions; i++) {
const unsigned c = tile_col + subBlockDim - 1;
const unsigned r = tile_row + i * blockDim.z + threadIdx.z;
if (threadIdx.x < blockDim.x - 1 && c < labels.cols - 1 && r < labels.rows && r < tile_row + subBlockDim) {
int label = labels[r * labels.step / sizeof(int) + c];
if (label) {
labels[r * labels.step / sizeof(int) + c] = Find(labels, label - 1) + 1;
}
}
}
}
__global__ void PathCompression(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned global_row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned global_col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
if (global_row < labels.rows && global_col < labels.cols) {
unsigned char val = img[global_row * img.step + global_col];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class STAVA : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
STAVA() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + TILE_SIZE - 1) / TILE_SIZE, (d_img_.rows + TILE_SIZE - 1) / TILE_SIZE, 1);
block_size_ = dim3(TILE_SIZE, TILE_SIZE, 1);
// Phase 1
// Label pixels locally to a tile
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// Phase 1 output
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
// Merges together Union-Find trees from different tiles, in a recursive manner
uint32_t max_img_dim = max(img_.rows, img_.cols);
uint32_t sub_block_dim = TILE_SIZE;
uint32_t block_pixels = sub_block_dim * TILE_GRID_SIZE;
dim3 grid_size_merge;
dim3 block_size_merge = dim3(TILE_GRID_SIZE, TILE_GRID_SIZE, THREAD_PER_TILE);
while (sub_block_dim < max_img_dim) {
grid_size_merge = dim3((d_img_.cols + block_pixels - 1) / block_pixels, (d_img_.rows + block_pixels - 1) / block_pixels, 1);
GlobalMerge << <grid_size_merge, block_size_merge >> > (d_img_, d_img_labels_, sub_block_dim);
BorderCompression << <grid_size_merge, block_size_merge >> > (d_img_labels_, sub_block_dim);
sub_block_dim = block_pixels;
block_pixels *= TILE_GRID_SIZE;
// Phase 2 output
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
}
// Phase 3
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
hipMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
hipDeviceSynchronize();
double t = perf_.stop();
perf_.start();
hipMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
hipDeviceSynchronize();
t -= perf_.stop();
return t;
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.cols + TILE_SIZE - 1) / TILE_SIZE, (d_img_.rows + TILE_SIZE - 1) / TILE_SIZE, 1);
block_size_ = dim3(TILE_SIZE, TILE_SIZE, 1);
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
void GlobalScan() {
uint32_t max_img_dim = max(img_.rows, img_.cols);
uint32_t sub_block_dim = TILE_SIZE;
uint32_t block_pixels = sub_block_dim * TILE_GRID_SIZE;
dim3 grid_size_merge;
dim3 block_size_merge = dim3(TILE_GRID_SIZE, TILE_GRID_SIZE, THREAD_PER_TILE);
while (sub_block_dim < max_img_dim) {
grid_size_merge = dim3((d_img_.cols + block_pixels - 1) / block_pixels, (d_img_.rows + block_pixels - 1) / block_pixels, 1);
GlobalMerge << <grid_size_merge, block_size_merge >> > (d_img_, d_img_labels_, sub_block_dim);
BorderCompression << <grid_size_merge, block_size_merge >> > (d_img_labels_, sub_block_dim);
sub_block_dim = block_pixels;
block_pixels *= TILE_GRID_SIZE;
}
// Phase 3
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
perf_.stop();
perf_.store(Step(StepType::FIRST_SCAN), perf_.last());
perf_.start();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::SECOND_SCAN), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(STAVA);
| f37093853d3d114b8a53192dd9ee5037f9dd937e.cu | // Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Stava and Benes
#define TILE_SIZE 16
#define TILE_GRID_SIZE 4
#define THREAD_PER_TILE 16
using namespace cv;
namespace {
// Returns the root index of the UFTree
__device__ unsigned Find(const int* s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
__device__ void Union(int* s_buf, unsigned index_a, unsigned index_b, char* changed) {
unsigned a = s_buf[index_a];
if (!a) return;
unsigned b = s_buf[index_b];
if (!b) return;
--a;
--b;
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a != b) {
*changed = 1;
}
if (a < b) {
atomicMin(s_buf + b, a + 1);
}
else if (b < a) {
atomicMin(s_buf + a, b + 1);
}
}
// Perform local CCL on 16x16 tiles
__global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
const unsigned r = threadIdx.y;
const unsigned c = threadIdx.x;
const unsigned local_index = r * blockDim.x + c;
const unsigned global_row = blockIdx.y * blockDim.y + r;
const unsigned global_col = blockIdx.x * blockDim.x + c;
const unsigned img_index = global_row * img.step + global_col;
__shared__ int s_buf[TILE_SIZE * TILE_SIZE];
__shared__ unsigned char s_img[TILE_SIZE * TILE_SIZE];
__shared__ char changed[1];
bool in_limits = (global_row < img.rows&& global_col < img.cols);
s_img[local_index] = in_limits ? img[img_index] : 0;
unsigned char v = s_img[local_index];
int label = v ? local_index + 1 : 0;
__syncthreads();
while (1) {
// Pass 1 of the CCL algorithm
s_buf[local_index] = label;
if (threadIdx.x == 0 && threadIdx.y == 0) {
changed[0] = 0;
}
int new_label = label;
__syncthreads();
// Find the minimal label from the neighboring elements
if (label) {
if (r > 0 && c > 0 && s_img[local_index - TILE_SIZE - 1]) {
new_label = min(new_label, s_buf[local_index - TILE_SIZE - 1]);
}
if (r > 0 && s_img[local_index - TILE_SIZE]) {
new_label = min(new_label, s_buf[local_index - TILE_SIZE]);
}
if (r > 0 && c < TILE_SIZE - 1 && s_img[local_index - TILE_SIZE + 1]) {
new_label = min(new_label, s_buf[local_index - TILE_SIZE + 1]);
}
if (c > 0 && s_img[local_index - 1]) {
new_label = min(new_label, s_buf[local_index - 1]);
}
if (c < TILE_SIZE - 1 && s_img[local_index + 1]) {
new_label = min(new_label, s_buf[local_index + 1]);
}
if (r < TILE_SIZE - 1 && c > 0 && s_img[local_index + TILE_SIZE - 1]) {
new_label = min(new_label, s_buf[local_index + TILE_SIZE - 1]);
}
if (r < TILE_SIZE - 1 && s_img[local_index + TILE_SIZE]) {
new_label = min(new_label, s_buf[local_index + TILE_SIZE]);
}
if (r < TILE_SIZE - 1 && c < TILE_SIZE - 1 && s_img[local_index + TILE_SIZE + 1]) {
new_label = min(new_label, s_buf[local_index + TILE_SIZE + 1]);
}
}
__syncthreads();
// If the new label is smaller than the old one merge the equivalence trees
if (new_label < label) {
atomicMin(s_buf + label - 1, new_label);
changed[0] = 1;
}
__syncthreads();
if (changed[0] == 0)
break;
if (label) {
// Pass 2 of the CCL algorithm
label = Find(s_buf, label - 1) + 1;
}
__syncthreads();
}
if (in_limits) {
// Store the result to the device memory
int global_label = 0;
if (v) {
unsigned f_row = (label - 1) / TILE_SIZE;
unsigned f_col = (label - 1) % TILE_SIZE;
global_label = (blockIdx.y * TILE_SIZE + f_row) * (labels.step / labels.elem_size) + (blockIdx.x * TILE_SIZE + f_col) + 1;
}
labels.data[global_row * labels.step / sizeof(int) + global_col] = global_label;
}
}
__global__ void GlobalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, uint32_t subBlockDim) {
// Coordinates of the top-left pixel of the current block of tiles
unsigned block_row = blockIdx.y * blockDim.y * subBlockDim;
unsigned block_col = blockIdx.x * blockDim.x * subBlockDim;
// Coordinates of the top-left pixel of the current tile
unsigned tile_row = block_row + threadIdx.y * subBlockDim;
unsigned tile_col = block_col + threadIdx.x * subBlockDim;
unsigned repetitions = (subBlockDim + blockDim.z - 1) / blockDim.z;
__shared__ char changed[1];
while (1) {
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
changed[0] = 0;
}
__syncthreads();
// Process the bottom horizontal border
for (unsigned i = 0; i < repetitions; i++) {
unsigned r = tile_row + subBlockDim - 1;
unsigned c = tile_col + i * blockDim.z + threadIdx.z;
if (threadIdx.y < blockDim.y - 1 && r < img.rows - 1 && c < img.cols && c < tile_col + subBlockDim) {
if (c > block_col) {
Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c - 1, changed);
}
Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c, changed);
if (c < img.cols - 1 && c < block_col + blockDim.x * subBlockDim - 1) {
Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c + 1, changed);
}
}
}
// Process the right vertical border
for (unsigned i = 0; i < repetitions; i++) {
unsigned c = tile_col + subBlockDim - 1;
unsigned r = tile_row + i * blockDim.z + threadIdx.z;
if (threadIdx.x < blockDim.x - 1 && c < img.cols - 1 && r < img.rows && r < tile_row + subBlockDim) {
if (r > block_row) {
Union(labels.data, r * labels.step / sizeof(int) + c, (r - 1) * labels.step / sizeof(int) + c + 1, changed);
}
Union(labels.data, r * labels.step / sizeof(int) + c, r * labels.step / sizeof(int) + c + 1, changed);
if (r < img.rows - 1 && r < block_row + blockDim.y * subBlockDim - 1) {
Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c + 1, changed);
}
}
}
__syncthreads();
if (changed[0] == 0) {
break; // the tiles are merged
}
__syncthreads();
}
}
__global__ void BorderCompression(cuda::PtrStepSzi labels, uint32_t subBlockDim) {
// Coordinates of the top-left pixel of the current block of tiles
const unsigned block_row = blockIdx.y * blockDim.y * subBlockDim;
const unsigned block_col = blockIdx.x * blockDim.x * subBlockDim;
// Coordinates of the top-left pixel of the current tile
const unsigned tile_row = block_row + threadIdx.y * subBlockDim;
const unsigned tile_col = block_col + threadIdx.x * subBlockDim;
const unsigned repetitions = (subBlockDim + blockDim.z - 1) / blockDim.z;
// Process the bottom horizontal border
for (unsigned i = 0; i < repetitions; i++) {
const unsigned r = tile_row + subBlockDim - 1;
const unsigned c = tile_col + i * blockDim.z + threadIdx.z;
if (threadIdx.y < blockDim.y - 1 && r < labels.rows - 1 && c < labels.cols && c < tile_col + subBlockDim) {
int label = labels[r * labels.step / sizeof(int) + c];
if (label) {
labels[r * labels.step / sizeof(int) + c] = Find(labels, label - 1) + 1;
}
}
}
// Process the right vertical border
for (unsigned i = 0; i < repetitions; i++) {
const unsigned c = tile_col + subBlockDim - 1;
const unsigned r = tile_row + i * blockDim.z + threadIdx.z;
if (threadIdx.x < blockDim.x - 1 && c < labels.cols - 1 && r < labels.rows && r < tile_row + subBlockDim) {
int label = labels[r * labels.step / sizeof(int) + c];
if (label) {
labels[r * labels.step / sizeof(int) + c] = Find(labels, label - 1) + 1;
}
}
}
}
__global__ void PathCompression(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned global_row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned global_col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
if (global_row < labels.rows && global_col < labels.cols) {
unsigned char val = img[global_row * img.step + global_col];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class STAVA : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
STAVA() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + TILE_SIZE - 1) / TILE_SIZE, (d_img_.rows + TILE_SIZE - 1) / TILE_SIZE, 1);
block_size_ = dim3(TILE_SIZE, TILE_SIZE, 1);
// Phase 1
// Label pixels locally to a tile
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// Phase 1 output
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
// Merges together Union-Find trees from different tiles, in a recursive manner
uint32_t max_img_dim = max(img_.rows, img_.cols);
uint32_t sub_block_dim = TILE_SIZE;
uint32_t block_pixels = sub_block_dim * TILE_GRID_SIZE;
dim3 grid_size_merge;
dim3 block_size_merge = dim3(TILE_GRID_SIZE, TILE_GRID_SIZE, THREAD_PER_TILE);
while (sub_block_dim < max_img_dim) {
grid_size_merge = dim3((d_img_.cols + block_pixels - 1) / block_pixels, (d_img_.rows + block_pixels - 1) / block_pixels, 1);
GlobalMerge << <grid_size_merge, block_size_merge >> > (d_img_, d_img_labels_, sub_block_dim);
BorderCompression << <grid_size_merge, block_size_merge >> > (d_img_labels_, sub_block_dim);
sub_block_dim = block_pixels;
block_pixels *= TILE_GRID_SIZE;
// Phase 2 output
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
}
// Phase 3
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
cudaMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
cudaDeviceSynchronize();
double t = perf_.stop();
perf_.start();
cudaMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
cudaDeviceSynchronize();
t -= perf_.stop();
return t;
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.cols + TILE_SIZE - 1) / TILE_SIZE, (d_img_.rows + TILE_SIZE - 1) / TILE_SIZE, 1);
block_size_ = dim3(TILE_SIZE, TILE_SIZE, 1);
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
void GlobalScan() {
uint32_t max_img_dim = max(img_.rows, img_.cols);
uint32_t sub_block_dim = TILE_SIZE;
uint32_t block_pixels = sub_block_dim * TILE_GRID_SIZE;
dim3 grid_size_merge;
dim3 block_size_merge = dim3(TILE_GRID_SIZE, TILE_GRID_SIZE, THREAD_PER_TILE);
while (sub_block_dim < max_img_dim) {
grid_size_merge = dim3((d_img_.cols + block_pixels - 1) / block_pixels, (d_img_.rows + block_pixels - 1) / block_pixels, 1);
GlobalMerge << <grid_size_merge, block_size_merge >> > (d_img_, d_img_labels_, sub_block_dim);
BorderCompression << <grid_size_merge, block_size_merge >> > (d_img_labels_, sub_block_dim);
sub_block_dim = block_pixels;
block_pixels *= TILE_GRID_SIZE;
}
// Phase 3
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
perf_.stop();
perf_.store(Step(StepType::FIRST_SCAN), perf_.last());
perf_.start();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::SECOND_SCAN), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(STAVA);
|
b64431a8825e278fcaee74b8d1460b699c251c40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "softmax_op.h"
#include "softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
float weight = weights ? weights[i] : 1.0;
float total_prob = 0.0;
Ydata[i] = 0.0;
for (int j = 0; j < D; j++) {
int idx = i * D + j;
total_prob += labeldata[idx];
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
Ydata[i] += -logf(max(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
CUDA_KERNEL_ASSERT(abs(total_prob - 1.0) < 1e-5f);
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(i, N) {
for (int j = 0; j < D; j++) {
int idx = i * D + j;
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
}
} else {
CUDA_1D_KERNEL_LOOP(i, N) {
float weight = weights[i];
for (int d = 0; d < D; d++) {
int idx = i * D + d;
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weight;
}
}
}
}
#define REDUCTION_KERNEL_THREADS_X 128
#define REDUCTION_KERNEL_THREADS_Y 4
#define REDUCTION_THREADS \
(REDUCTION_KERNEL_THREADS_X * REDUCTION_KERNEL_THREADS_Y)
__global__ void
RowMaxKernelLargeD(const int num, const int D, const float* data, float* out) {
__shared__ float
max_buffer[REDUCTION_KERNEL_THREADS_Y * REDUCTION_KERNEL_THREADS_X];
const int threadId = threadIdx.y * REDUCTION_KERNEL_THREADS_X + threadIdx.x;
for (int index = blockIdx.y * blockDim.y + threadIdx.y; index < num;
index += blockDim.y * gridDim.y) {
float maxval = -FLT_MAX;
for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < D;
x += blockDim.x * gridDim.x) {
maxval = fmaxf(data[index * D + x], maxval);
}
max_buffer[threadId] = maxval;
__syncthreads();
if (threadIdx.x < 32) {
maxval = fmaxf(
fmaxf(
fmaxf(maxval, max_buffer[threadId + 32]),
max_buffer[threadId + 64]),
max_buffer[threadId + 96]);
max_buffer[threadId] = maxval;
}
__syncthreads();
if (threadIdx.x == 0) {
#pragma unroll
for (int j = 1; j < 32; j++) {
maxval = max(max_buffer[threadId + j], maxval);
}
out[index] = maxval;
}
__syncthreads();
}
}
__global__ void RowMaxKernel(const int num, const int D, const float* data,
float* out) {
CUDA_1D_KERNEL_LOOP(index, num) {
float maxval = -FLT_MAX;
for (int d = 0; d < D; ++d) {
maxval = max(data[index * D + d], maxval);
}
out[index] = maxval;
}
}
__global__ void SpatialSoftmaxKernel(const int num, const int D, const int W, const int H,
const float* Xdata, float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(const int N, const int D, const int W, const int H,
const float* Pdata, const int* label_data, const float *weights,
float* loss_data, float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -log(max(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(max(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
if (D > 512) {
dim3 threadsPerBlock(
REDUCTION_KERNEL_THREADS_X, REDUCTION_KERNEL_THREADS_Y);
dim3 numBlocks(1, max(1, N / 32));
hipLaunchKernelGGL(( RowMaxKernelLargeD),
dim3(numBlocks),
dim3(threadsPerBlock),
0,
context->cuda_stream(), N, D, logits, rowmax);
} else {
hipLaunchKernelGGL(( RowMaxKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, logits, rowmax);
}
// Put the intermediate result X - max(X) into Y
context->Copy<float, CUDAContext, CUDAContext>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
hipLaunchKernelGGL(( SoftmaxNormalizeKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, probs, scales, probs);
} else {
hipLaunchKernelGGL(( SoftmaxNormalizeLogsKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N = X.dim32(0);
int D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
DCHECK(!(spatial_mode_ && label_prob_mode_)); // Do not currently support both
if (!spatial_mode_) {
DCHECK_EQ(X.ndim(), 2);
if (!label_prob_mode_) {
DCHECK((T.ndim() == 1) || (T.ndim() == 2 && T.dim32(1) == 1));
} else {
DCHECK(T.ndim() == 2 && T.dim32(0) == N && T.dim32(1) == D);
}
DCHECK_EQ(T.dim32(0), N);
avg_loss->Resize(vector<TIndex>());
if (losses_.size() != N) {
losses_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
hipLaunchKernelGGL(( LabelCrossEntropyKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->mutable_data<float>(), &context_);
} else {
hipLaunchKernelGGL(( ProbCrossEntropyKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(N, weights,
total_weight_ptr_.mutable_data<float>(), &context_);
hipMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(),
sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream());
}
// Sum of all losses
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
} else {
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
DCHECK_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (losses_.size() != N * W * H) {
losses_.Resize(N * W * H);
}
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->mutable_data<float>();
// Softmax for each x,y location
hipLaunchKernelGGL(( SpatialSoftmaxKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, D, W, H, Xdata, Pdata);
// Cross entropy
avg_loss->Resize(vector<TIndex>());
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialCrossEntropyLossKernel), dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, D, W, H, P->data<float>(), label_data, weights,
losses_.mutable_data<float>(), weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.size(), weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(), &context_);
hipMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(),
sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream());
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / h_total_weight,
avg_loss_data, avg_loss_data, &context_);
}
}
return true;
}
template<>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
int N = X.dim32(0);
int D = X.dim32(1);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
if (!spatial_mode_) {
DCHECK_EQ(X.ndim(), 2);
DCHECK(
(T.ndim() == 1) || (T.ndim() == 2 && T.dim32(1) == 1) ||
(T.ndim() == 2 && T.dim32(0) == N && T.dim32(1) == D));
DCHECK_EQ(T.dim32(0), N);
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.Copy<float, CUDAContext, CUDAContext>(
P.size(), P.data<float>(), dX->mutable_data<float>());
}
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernelWeighted),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<int>(),
dX->mutable_data<float>(),
weights);
}
} else {
hipLaunchKernelGGL(( ProbCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<float>(),
dX->mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_);
hipMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(),
sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream());
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(), d_avg_loss.data<float>(), dX->data<float>(),
dX->mutable_data<float>(), &context_);
} else {
// Spatial mode, compute softmax for each x, y location
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.Copy<float, CUDAContext, CUDAContext>(P.size(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialSoftmaxLossGradientKernel), dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, D, W, H, label_data, weights, dX_data,
weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.size(), weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(), &context_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
hipMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(),
sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream());
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / h_total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* P = Output(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (scale_.size() != N) {
scale_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->mutable_data<float>(),
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
dX->ResizeLike(Y);
hipLaunchKernelGGL(( softmax_gradient_kernel),
dim3(N),
dim3(SOFTMAX_NUM_THREADS),
0,
context_.cuda_stream(),
D, Y.data<float>(), dY.data<float>(), dX->mutable_data<float>());
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
| b64431a8825e278fcaee74b8d1460b699c251c40.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "softmax_op.h"
#include "softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
float weight = weights ? weights[i] : 1.0;
float total_prob = 0.0;
Ydata[i] = 0.0;
for (int j = 0; j < D; j++) {
int idx = i * D + j;
total_prob += labeldata[idx];
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
Ydata[i] += -logf(max(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
CUDA_KERNEL_ASSERT(abs(total_prob - 1.0) < 1e-5f);
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(i, N) {
for (int j = 0; j < D; j++) {
int idx = i * D + j;
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
}
} else {
CUDA_1D_KERNEL_LOOP(i, N) {
float weight = weights[i];
for (int d = 0; d < D; d++) {
int idx = i * D + d;
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weight;
}
}
}
}
#define REDUCTION_KERNEL_THREADS_X 128
#define REDUCTION_KERNEL_THREADS_Y 4
#define REDUCTION_THREADS \
(REDUCTION_KERNEL_THREADS_X * REDUCTION_KERNEL_THREADS_Y)
__global__ void
RowMaxKernelLargeD(const int num, const int D, const float* data, float* out) {
__shared__ float
max_buffer[REDUCTION_KERNEL_THREADS_Y * REDUCTION_KERNEL_THREADS_X];
const int threadId = threadIdx.y * REDUCTION_KERNEL_THREADS_X + threadIdx.x;
for (int index = blockIdx.y * blockDim.y + threadIdx.y; index < num;
index += blockDim.y * gridDim.y) {
float maxval = -FLT_MAX;
for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < D;
x += blockDim.x * gridDim.x) {
maxval = fmaxf(data[index * D + x], maxval);
}
max_buffer[threadId] = maxval;
__syncthreads();
if (threadIdx.x < 32) {
maxval = fmaxf(
fmaxf(
fmaxf(maxval, max_buffer[threadId + 32]),
max_buffer[threadId + 64]),
max_buffer[threadId + 96]);
max_buffer[threadId] = maxval;
}
__syncthreads();
if (threadIdx.x == 0) {
#pragma unroll
for (int j = 1; j < 32; j++) {
maxval = max(max_buffer[threadId + j], maxval);
}
out[index] = maxval;
}
__syncthreads();
}
}
__global__ void RowMaxKernel(const int num, const int D, const float* data,
float* out) {
CUDA_1D_KERNEL_LOOP(index, num) {
float maxval = -FLT_MAX;
for (int d = 0; d < D; ++d) {
maxval = max(data[index * D + d], maxval);
}
out[index] = maxval;
}
}
__global__ void SpatialSoftmaxKernel(const int num, const int D, const int W, const int H,
const float* Xdata, float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(const int N, const int D, const int W, const int H,
const float* Pdata, const int* label_data, const float *weights,
float* loss_data, float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -log(max(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(max(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
if (D > 512) {
dim3 threadsPerBlock(
REDUCTION_KERNEL_THREADS_X, REDUCTION_KERNEL_THREADS_Y);
dim3 numBlocks(1, max(1, N / 32));
RowMaxKernelLargeD<<<
numBlocks,
threadsPerBlock,
0,
context->cuda_stream()>>>(N, D, logits, rowmax);
} else {
RowMaxKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, logits, rowmax);
}
// Put the intermediate result X - max(X) into Y
context->Copy<float, CUDAContext, CUDAContext>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
SoftmaxNormalizeKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, probs, scales, probs);
} else {
SoftmaxNormalizeLogsKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N = X.dim32(0);
int D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
DCHECK(!(spatial_mode_ && label_prob_mode_)); // Do not currently support both
if (!spatial_mode_) {
DCHECK_EQ(X.ndim(), 2);
if (!label_prob_mode_) {
DCHECK((T.ndim() == 1) || (T.ndim() == 2 && T.dim32(1) == 1));
} else {
DCHECK(T.ndim() == 2 && T.dim32(0) == N && T.dim32(1) == D);
}
DCHECK_EQ(T.dim32(0), N);
avg_loss->Resize(vector<TIndex>());
if (losses_.size() != N) {
losses_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
LabelCrossEntropyKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->mutable_data<float>(), &context_);
} else {
ProbCrossEntropyKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(N, weights,
total_weight_ptr_.mutable_data<float>(), &context_);
cudaMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(),
sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream());
}
// Sum of all losses
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
} else {
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
DCHECK_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (losses_.size() != N * W * H) {
losses_.Resize(N * W * H);
}
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->mutable_data<float>();
// Softmax for each x,y location
SpatialSoftmaxKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, D, W, H, Xdata, Pdata);
// Cross entropy
avg_loss->Resize(vector<TIndex>());
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialCrossEntropyLossKernel<<<CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, D, W, H, P->data<float>(), label_data, weights,
losses_.mutable_data<float>(), weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.size(), weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(), &context_);
cudaMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(),
sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream());
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / h_total_weight,
avg_loss_data, avg_loss_data, &context_);
}
}
return true;
}
template<>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
int N = X.dim32(0);
int D = X.dim32(1);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
if (!spatial_mode_) {
DCHECK_EQ(X.ndim(), 2);
DCHECK(
(T.ndim() == 1) || (T.ndim() == 2 && T.dim32(1) == 1) ||
(T.ndim() == 2 && T.dim32(0) == N && T.dim32(1) == D));
DCHECK_EQ(T.dim32(0), N);
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.Copy<float, CUDAContext, CUDAContext>(
P.size(), P.data<float>(), dX->mutable_data<float>());
}
LabelCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
LabelCrossEntropyGradientKernelWeighted<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<int>(),
dX->mutable_data<float>(),
weights);
}
} else {
ProbCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<float>(),
dX->mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_);
cudaMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(),
sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream());
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(), d_avg_loss.data<float>(), dX->data<float>(),
dX->mutable_data<float>(), &context_);
} else {
// Spatial mode, compute softmax for each x, y location
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.Copy<float, CUDAContext, CUDAContext>(P.size(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialSoftmaxLossGradientKernel<<<CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, D, W, H, label_data, weights, dX_data,
weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.size(), weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(), &context_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
cudaMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(),
sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream());
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / h_total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* P = Output(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (scale_.size() != N) {
scale_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->mutable_data<float>(),
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
dX->ResizeLike(Y);
softmax_gradient_kernel<<<
N,
SOFTMAX_NUM_THREADS,
0,
context_.cuda_stream()>>>(
D, Y.data<float>(), dY.data<float>(), dX->mutable_data<float>());
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
|
dc5ad518cc45f61b86908a8fb8a3db0b82dbaec4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "GpuTypes.h"
#include "NNTypes.h"
#include <limits>
static __constant__ GpuData cData;
__device__ inline uint64_t llitoulli(int64_t l)
{
uint64_t u;
asm("mov.b64 %0, %1;" : "=l"(u) : "l"(l));
return u;
}
__device__ inline int64_t ullitolli(uint64_t u)
{
int64_t l;
asm("mov.b64 %0, %1;" : "=l"(l) : "l"(u));
return l;
}
__device__ inline float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do
{
assumed = old;
old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fmaxf(val, __int_as_float(assumed))));
}
while (assumed != old);
return __int_as_float(old);
}
void SetKActivationGpuData()
{
hipError_t status;
status = hipMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData));
RTERROR(status, "hipMemcpyToSymbol: SetKActivationGpuData copy to cData failed");
}
void GetKActivationGpuData()
{
hipError_t status;
status = hipMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData));
RTERROR(status, "hipMemcpyFromSymbol: GetKActivationGpuData copy From cData failed");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidActivation_kernel(NNFloat* pData, uint64_t size)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat a = 1.0f / (1.0f + exp(-pData[pos]));
pData[pos] = a;
}
}
void kCalculateSigmoidActivation(NNFloat* pData, uint64_t size)
{
uint32_t blocks = CalculateBlocks(size);
hipLaunchKernelGGL(( kCalculateSigmoidActivation_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pData, size);
LAUNCHERROR("kCalculateSigmoidActivation_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhActivation_kernel(NNFloat* pData, uint64_t size)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
pData[pos] = tanh(pData[pos]);
}
void kCalculateTanhActivation(NNFloat* pData, uint64_t size)
{
uint32_t blocks = CalculateBlocks(size);
hipLaunchKernelGGL(( kCalculateTanhActivation_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pData, size);
LAUNCHERROR("kCalculateTanhActivation_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateReluActivation_kernel(NNFloat* pData, uint64_t size)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
pData[pos] = max(0.0f, pData[pos]);
}
void kCalculateReluActivation(NNFloat* pData, uint64_t size)
{
uint32_t blocks = CalculateBlocks(size);
hipLaunchKernelGGL(( kCalculateReluActivation_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pData, size);
LAUNCHERROR("kCalculateReluActivation_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateLeakyReluActivation_kernel(NNFloat* pData, uint64_t size, NNFloat slope)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat val = pData[pos];
pData[pos] = max(val, val * slope);
}
}
void kCalculateLeakyReluActivation(NNFloat* pData, uint64_t size, NNFloat slope)
{
uint32_t blocks = CalculateBlocks(size);
hipLaunchKernelGGL(( kCalculateLeakyReluActivation_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pData, size, slope);
LAUNCHERROR("kCalculateReluActivation_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxActivation_kernel(NNFloat* pData, uint32_t stride)
{
__shared__ unsigned long long int sAccumulator;
__shared__ NNFloat sMaxValue;
if (threadIdx.x == 0)
{
sAccumulator = 0;
sMaxValue = (NNFloat)-99999999.0f;
}
__syncthreads();
// Move data pointer to proper row, calculate activations, and sum them up as well as find maxmum output
pData += blockIdx.x * stride;
uint32_t pos = threadIdx.x;
NNFloat maxValue = (NNFloat)-9999999999.0;
// Calculate max value to improve numerical stability (Theano does this so I'll assume it's a good idea)
while (pos < stride)
{
NNFloat z = pData[pos];
maxValue = max(z, maxValue);
pos += blockDim.x;
}
// Reduce maxValue within and between warps
uint32_t tgx = threadIdx.x & cData._warpMask;
maxValue = max(maxValue, __shfl(maxValue, tgx ^ 1));
maxValue = max(maxValue, __shfl(maxValue, tgx ^ 2));
maxValue = max(maxValue, __shfl(maxValue, tgx ^ 4));
maxValue = max(maxValue, __shfl(maxValue, tgx ^ 8));
maxValue = max(maxValue, __shfl(maxValue, tgx ^ 16));
// Convert to 64-bit int to work around GPU instruction set deficiency
if (tgx == 0)
atomicMax(&sMaxValue, maxValue);
__syncthreads();
maxValue = sMaxValue;
// Calculate sum
pos = threadIdx.x;
NNFloat sum = (NNFloat)0.0;
while (pos < stride)
{
NNFloat z = pData[pos];
sum += exp(z - maxValue);
pos += blockDim.x;
}
// Reduce sums within and between warps
sum += __shfl(sum, tgx ^ 1);
sum += __shfl(sum, tgx ^ 2);
sum += __shfl(sum, tgx ^ 4);
sum += __shfl(sum, tgx ^ 8);
sum += __shfl(sum, tgx ^ 16);
unsigned long long int lsum = llitoulli(llrintf(ERRORSCALEF * sum));
if (tgx == 0)
atomicAdd(&sAccumulator, lsum);
__syncthreads();
NNFloat norm = (NNFloat)1.0 / (NNFloat)((double)sAccumulator * ONEOVERERRORSCALE);
// Normalize output by dividing by sum of activations
pos = threadIdx.x;
while (pos < stride)
{
NNFloat z = pData[pos];
NNFloat a = exp(z - maxValue);
pData[pos] = min((NNFloat)1.0, a * norm);
pos += blockDim.x;
}
}
void kCalculateSoftMaxActivation(NNFloat* pData, uint32_t batch, uint32_t stride)
{
uint32_t warps = getGpu()._threadsPerBlock / getGpu()._warpSize;
hipLaunchKernelGGL(( kCalculateSoftMaxActivation_kernel), dim3(batch), dim3(getGpu()._threadsPerBlock), 0, 0, pData, stride);
LAUNCHERROR("kCalculateSoftMaxActivation_kernel");
}
| dc5ad518cc45f61b86908a8fb8a3db0b82dbaec4.cu | /*
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "GpuTypes.h"
#include "NNTypes.h"
#include <limits>
static __constant__ GpuData cData;
__device__ inline uint64_t llitoulli(int64_t l)
{
uint64_t u;
asm("mov.b64 %0, %1;" : "=l"(u) : "l"(l));
return u;
}
__device__ inline int64_t ullitolli(uint64_t u)
{
int64_t l;
asm("mov.b64 %0, %1;" : "=l"(l) : "l"(u));
return l;
}
__device__ inline float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do
{
assumed = old;
old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fmaxf(val, __int_as_float(assumed))));
}
while (assumed != old);
return __int_as_float(old);
}
void SetKActivationGpuData()
{
cudaError_t status;
status = cudaMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData));
RTERROR(status, "cudaMemcpyToSymbol: SetKActivationGpuData copy to cData failed");
}
void GetKActivationGpuData()
{
cudaError_t status;
status = cudaMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData));
RTERROR(status, "cudaMemcpyFromSymbol: GetKActivationGpuData copy From cData failed");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidActivation_kernel(NNFloat* pData, uint64_t size)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat a = 1.0f / (1.0f + exp(-pData[pos]));
pData[pos] = a;
}
}
void kCalculateSigmoidActivation(NNFloat* pData, uint64_t size)
{
uint32_t blocks = CalculateBlocks(size);
kCalculateSigmoidActivation_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pData, size);
LAUNCHERROR("kCalculateSigmoidActivation_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhActivation_kernel(NNFloat* pData, uint64_t size)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
pData[pos] = tanh(pData[pos]);
}
void kCalculateTanhActivation(NNFloat* pData, uint64_t size)
{
uint32_t blocks = CalculateBlocks(size);
kCalculateTanhActivation_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pData, size);
LAUNCHERROR("kCalculateTanhActivation_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateReluActivation_kernel(NNFloat* pData, uint64_t size)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
pData[pos] = max(0.0f, pData[pos]);
}
void kCalculateReluActivation(NNFloat* pData, uint64_t size)
{
uint32_t blocks = CalculateBlocks(size);
kCalculateReluActivation_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pData, size);
LAUNCHERROR("kCalculateReluActivation_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateLeakyReluActivation_kernel(NNFloat* pData, uint64_t size, NNFloat slope)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat val = pData[pos];
pData[pos] = max(val, val * slope);
}
}
void kCalculateLeakyReluActivation(NNFloat* pData, uint64_t size, NNFloat slope)
{
uint32_t blocks = CalculateBlocks(size);
kCalculateLeakyReluActivation_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pData, size, slope);
LAUNCHERROR("kCalculateReluActivation_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxActivation_kernel(NNFloat* pData, uint32_t stride)
{
__shared__ unsigned long long int sAccumulator;
__shared__ NNFloat sMaxValue;
if (threadIdx.x == 0)
{
sAccumulator = 0;
sMaxValue = (NNFloat)-99999999.0f;
}
__syncthreads();
// Move data pointer to proper row, calculate activations, and sum them up as well as find maxmum output
pData += blockIdx.x * stride;
uint32_t pos = threadIdx.x;
NNFloat maxValue = (NNFloat)-9999999999.0;
// Calculate max value to improve numerical stability (Theano does this so I'll assume it's a good idea)
while (pos < stride)
{
NNFloat z = pData[pos];
maxValue = max(z, maxValue);
pos += blockDim.x;
}
// Reduce maxValue within and between warps
uint32_t tgx = threadIdx.x & cData._warpMask;
maxValue = max(maxValue, __shfl(maxValue, tgx ^ 1));
maxValue = max(maxValue, __shfl(maxValue, tgx ^ 2));
maxValue = max(maxValue, __shfl(maxValue, tgx ^ 4));
maxValue = max(maxValue, __shfl(maxValue, tgx ^ 8));
maxValue = max(maxValue, __shfl(maxValue, tgx ^ 16));
// Convert to 64-bit int to work around GPU instruction set deficiency
if (tgx == 0)
atomicMax(&sMaxValue, maxValue);
__syncthreads();
maxValue = sMaxValue;
// Calculate sum
pos = threadIdx.x;
NNFloat sum = (NNFloat)0.0;
while (pos < stride)
{
NNFloat z = pData[pos];
sum += exp(z - maxValue);
pos += blockDim.x;
}
// Reduce sums within and between warps
sum += __shfl(sum, tgx ^ 1);
sum += __shfl(sum, tgx ^ 2);
sum += __shfl(sum, tgx ^ 4);
sum += __shfl(sum, tgx ^ 8);
sum += __shfl(sum, tgx ^ 16);
unsigned long long int lsum = llitoulli(llrintf(ERRORSCALEF * sum));
if (tgx == 0)
atomicAdd(&sAccumulator, lsum);
__syncthreads();
NNFloat norm = (NNFloat)1.0 / (NNFloat)((double)sAccumulator * ONEOVERERRORSCALE);
// Normalize output by dividing by sum of activations
pos = threadIdx.x;
while (pos < stride)
{
NNFloat z = pData[pos];
NNFloat a = exp(z - maxValue);
pData[pos] = min((NNFloat)1.0, a * norm);
pos += blockDim.x;
}
}
void kCalculateSoftMaxActivation(NNFloat* pData, uint32_t batch, uint32_t stride)
{
uint32_t warps = getGpu()._threadsPerBlock / getGpu()._warpSize;
kCalculateSoftMaxActivation_kernel<<<batch, getGpu()._threadsPerBlock>>>(pData, stride);
LAUNCHERROR("kCalculateSoftMaxActivation_kernel");
}
|
745cd6cf45654b9f12ca54bdc8b829cfe8df918c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduce3.h"
__device__ double merge(double old,double opOutput,double *extraParams) {
return old + opOutput;
}
__device__ double update(double old,double opOutput,double *extraParams) {
return old + opOutput;
}
/**
An op on the device
@param d1 the first operator
@param d2 the second operator
*/
__device__ double op(double d1,double d2,double *extraParams) {
return d1 - d2;
}
//post process result (for things like means etc)
__device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) {
return reduction / extraParams[0] / extraParams[1];
}
extern "C"
__global__ void euclidean_strided_double(int n, int xOffset,int yOffset,double *dx,double *dy,int incx,int incy,double *extraParams,double *result) {
transform_pair(n,xOffset,yOffset,dx,dy,incx,incy,extraParams,result);
}
| 745cd6cf45654b9f12ca54bdc8b829cfe8df918c.cu | #include "reduce3.h"
__device__ double merge(double old,double opOutput,double *extraParams) {
return old + opOutput;
}
__device__ double update(double old,double opOutput,double *extraParams) {
return old + opOutput;
}
/**
An op on the device
@param d1 the first operator
@param d2 the second operator
*/
__device__ double op(double d1,double d2,double *extraParams) {
return d1 - d2;
}
//post process result (for things like means etc)
__device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) {
return reduction / extraParams[0] / extraParams[1];
}
extern "C"
__global__ void euclidean_strided_double(int n, int xOffset,int yOffset,double *dx,double *dy,int incx,int incy,double *extraParams,double *result) {
transform_pair(n,xOffset,yOffset,dx,dy,incx,incy,extraParams,result);
}
|
608116e3cde27c3148cc23c65a287a07579b67be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ScatterGatherChecks.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
namespace at { namespace native {
// The kernels are implemented on an opaque,
// self-aligned type of the correct size,
// to avoid redundant kernels for different types
// of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
// essentialy rewritten related to legacy::launch_kernel parts
template <int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, vt)
__global__ void _scatter_gather_elementwise_kernel(int N, func_t f) {
constexpr int nv = nt * vt;
int idx = nv * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < vt; ++i) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template <int nt, int vt, typename func_t>
static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( _scatter_gather_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f);
AT_CUDA_CHECK(hipGetLastError());
}
template <bool is_scatter_like, typename scalar_t>
struct _cuda_scatter_gather_internal_kernel {
template <typename func_t>
void operator() (
TensorIterator& iter,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()(
sub_iter, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* src_ptr = (char*)iter.data_ptr(1);
char* index_ptr = (char*)iter.data_ptr(2);
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds");
char* self_data = self_ptr + offsets[0];
char* src_data = src_ptr + offsets[1];
f(
(scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0),
(scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride)
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool is_scatter_like = true, bool cast_to_opaque = true>
struct cuda_scatter_gather_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, const Tensor& src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index, src);
if (is_scatter_like) {
scatter_shape_check(self, dim, index, src);
}
else {
gather_shape_check(self, dim, index, src);
}
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
auto self_strides = ensure_nonempty_vec(self.strides().vec());
auto src_strides = ensure_nonempty_vec(src.strides().vec());
// restride self and src such that
// self.shape = src.shape = index.shape
//
// restride stride[dim] such that
// if (is_scatter_like) self.stride[dim] = 0
// else src.stride[dim] = 0
auto self_restrided = is_scatter_like ?
restride_dim(self, dim, index_sizes)
: self.as_strided(index_sizes, self_strides);
auto src_restrided = is_scatter_like ?
src.as_strided(index_sizes, src_strides)
: restride_dim(src, dim, index_sizes);
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.dont_resize_outputs()
.add_output(self_restrided)
.add_input(src_restrided)
.add_input(index)
.build();
auto self_dim_stride = ensure_nonempty_stride(self, dim);
auto self_dim_size = ensure_nonempty_size(self, dim);
auto src_dim_stride = ensure_nonempty_stride(src, dim);
auto src_dim_size = ensure_nonempty_size(src, dim);
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
method_name, [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
_cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()(
iter, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_gather_base_kernel
template <typename scalar_t>
struct _cuda_scatter_fill_internal_kernel {
template <typename func_t>
void operator()(
TensorIterator& iter,
scalar_t src_val,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_fill_internal_kernel<scalar_t>()(
sub_iter, src_val, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* index_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds"
);
char* self_data = self_ptr + offsets[0];
f(
(scalar_t*)self_data + idx_dim * index_stride,
&src_val
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool cast_to_opaque = true>
struct cuda_scatter_fill_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, Scalar src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index);
scatter_shape_check(self, dim, index);
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
// restride self such that
// self.shape = index.shape and
// self.stride[dim] = 0
auto self_restrided = restride_dim(self, dim, index_sizes);
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.dont_resize_outputs()
.add_output(self_restrided)
.add_input(index)
.build();
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
method_name, [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
auto src_scalar_val = src.to<scalar_t>();
auto src_val = *(dtype*)&src_scalar_val;
_cuda_scatter_fill_internal_kernel<dtype>()(
iter, src_val, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_fill_base_kernel
void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) {
cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()(
result, dim, index, self,
"gather_out_cuda", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
cuda_scatter_gather_base_kernel<>()(
self, dim, index, src,
"scatter_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, Scalar src) {
cuda_scatter_fill_base_kernel<>()(
self, dim, index, src,
"scatter_fill_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()(
self, dim, index, src,
"scatter_add_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
gpuAtomicAdd(lhs, *rhs);
}
);
}
REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel);
REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel);
REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel);
REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel);
}} // namespace at::native
| 608116e3cde27c3148cc23c65a287a07579b67be.cu | #include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ScatterGatherChecks.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
namespace at { namespace native {
// The kernels are implemented on an opaque,
// self-aligned type of the correct size,
// to avoid redundant kernels for different types
// of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
// essentialy rewritten related to legacy::launch_kernel parts
template <int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, vt)
__global__ void _scatter_gather_elementwise_kernel(int N, func_t f) {
constexpr int nv = nt * vt;
int idx = nv * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < vt; ++i) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template <int nt, int vt, typename func_t>
static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
auto stream = at::cuda::getCurrentCUDAStream();
_scatter_gather_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f);
AT_CUDA_CHECK(cudaGetLastError());
}
template <bool is_scatter_like, typename scalar_t>
struct _cuda_scatter_gather_internal_kernel {
template <typename func_t>
void operator() (
TensorIterator& iter,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()(
sub_iter, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* src_ptr = (char*)iter.data_ptr(1);
char* index_ptr = (char*)iter.data_ptr(2);
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds");
char* self_data = self_ptr + offsets[0];
char* src_data = src_ptr + offsets[1];
f(
(scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0),
(scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride)
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool is_scatter_like = true, bool cast_to_opaque = true>
struct cuda_scatter_gather_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, const Tensor& src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index, src);
if (is_scatter_like) {
scatter_shape_check(self, dim, index, src);
}
else {
gather_shape_check(self, dim, index, src);
}
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
auto self_strides = ensure_nonempty_vec(self.strides().vec());
auto src_strides = ensure_nonempty_vec(src.strides().vec());
// restride self and src such that
// self.shape = src.shape = index.shape
//
// restride stride[dim] such that
// if (is_scatter_like) self.stride[dim] = 0
// else src.stride[dim] = 0
auto self_restrided = is_scatter_like ?
restride_dim(self, dim, index_sizes)
: self.as_strided(index_sizes, self_strides);
auto src_restrided = is_scatter_like ?
src.as_strided(index_sizes, src_strides)
: restride_dim(src, dim, index_sizes);
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.dont_resize_outputs()
.add_output(self_restrided)
.add_input(src_restrided)
.add_input(index)
.build();
auto self_dim_stride = ensure_nonempty_stride(self, dim);
auto self_dim_size = ensure_nonempty_size(self, dim);
auto src_dim_stride = ensure_nonempty_stride(src, dim);
auto src_dim_size = ensure_nonempty_size(src, dim);
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
method_name, [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
_cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()(
iter, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_gather_base_kernel
template <typename scalar_t>
struct _cuda_scatter_fill_internal_kernel {
template <typename func_t>
void operator()(
TensorIterator& iter,
scalar_t src_val,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_fill_internal_kernel<scalar_t>()(
sub_iter, src_val, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* index_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds"
);
char* self_data = self_ptr + offsets[0];
f(
(scalar_t*)self_data + idx_dim * index_stride,
&src_val
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool cast_to_opaque = true>
struct cuda_scatter_fill_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, Scalar src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index);
scatter_shape_check(self, dim, index);
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
// restride self such that
// self.shape = index.shape and
// self.stride[dim] = 0
auto self_restrided = restride_dim(self, dim, index_sizes);
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.dont_resize_outputs()
.add_output(self_restrided)
.add_input(index)
.build();
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
method_name, [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
auto src_scalar_val = src.to<scalar_t>();
auto src_val = *(dtype*)&src_scalar_val;
_cuda_scatter_fill_internal_kernel<dtype>()(
iter, src_val, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_fill_base_kernel
void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) {
cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()(
result, dim, index, self,
"gather_out_cuda", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
cuda_scatter_gather_base_kernel<>()(
self, dim, index, src,
"scatter_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, Scalar src) {
cuda_scatter_fill_base_kernel<>()(
self, dim, index, src,
"scatter_fill_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()(
self, dim, index, src,
"scatter_add_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
gpuAtomicAdd(lhs, *rhs);
}
);
}
REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel);
REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel);
REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel);
REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel);
}} // namespace at::native
|
28b02ab179c82c8681c88fadeb16edc42824acd7.hip | // !!! This is a file automatically generated by hipify!!!
//****************************************************************************80
//
// file name:
//
// raise_to_power.cu
//
// licensing:
//
// this code is distributed under the mit license.
//
// author:
// bo pace
//
// reference:
// based on https://developer.nvidia.com/blog/even-easier-introduction-cuda/
// an article by mark harris of nvidia
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <math.h>
//****************************************************************************80
//
// description:
//
// cuda kernal function. raises the elements of one array to the power of the
// elements of another array
//
// last modified:
//
// 17 april 2021
//
// input:
//
// int n - the length of the arrays
// float* arr1 - pointer to array of bases
// float* arr2 - pointer to array of exponents
//
__global__
void raise_to_power(int n, float* arr1, float* arr2)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
arr2[i] = pow(arr1[i], arr2[i]);
}
}
int main(void)
{
int arr_size = 1 << 20; // 1 million
// allocate unified memory -- accessible from cpu or gpu
float* arr1, * arr2;
hipMallocManaged(&arr1, arr_size * sizeof(float));
hipMallocManaged(&arr2, arr_size * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < arr_size; i++)
{
arr1[i] = 3.0f;
arr2[i] = 2.0f;
}
int blockSize = 256;
int numBlocks = (arr_size + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( raise_to_power) , dim3(numBlocks), dim3(blockSize), 0, 0, arr_size, arr1, arr2);
// wait for gpu to finish before accessing on host
hipDeviceSynchronize();
// check for errors (all values should be 9.0f)
float maxError = 0.0f;
for (int i = 0; i < arr_size; i++)
{
maxError = fmax(maxError, fabs(arr2[i] - 9.0f));
}
std::cout << "Max error: " << maxError << '\n';
// free memory
hipFree(arr1);
hipFree(arr2);
return 0;
}
| 28b02ab179c82c8681c88fadeb16edc42824acd7.cu | //****************************************************************************80
//
// file name:
//
// raise_to_power.cu
//
// licensing:
//
// this code is distributed under the mit license.
//
// author:
// bo pace
//
// reference:
// based on https://developer.nvidia.com/blog/even-easier-introduction-cuda/
// an article by mark harris of nvidia
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <math.h>
//****************************************************************************80
//
// description:
//
// cuda kernal function. raises the elements of one array to the power of the
// elements of another array
//
// last modified:
//
// 17 april 2021
//
// input:
//
// int n - the length of the arrays
// float* arr1 - pointer to array of bases
// float* arr2 - pointer to array of exponents
//
__global__
void raise_to_power(int n, float* arr1, float* arr2)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
arr2[i] = pow(arr1[i], arr2[i]);
}
}
int main(void)
{
int arr_size = 1 << 20; // 1 million
// allocate unified memory -- accessible from cpu or gpu
float* arr1, * arr2;
cudaMallocManaged(&arr1, arr_size * sizeof(float));
cudaMallocManaged(&arr2, arr_size * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < arr_size; i++)
{
arr1[i] = 3.0f;
arr2[i] = 2.0f;
}
int blockSize = 256;
int numBlocks = (arr_size + blockSize - 1) / blockSize;
raise_to_power <<<numBlocks, blockSize>>> (arr_size, arr1, arr2);
// wait for gpu to finish before accessing on host
cudaDeviceSynchronize();
// check for errors (all values should be 9.0f)
float maxError = 0.0f;
for (int i = 0; i < arr_size; i++)
{
maxError = fmax(maxError, fabs(arr2[i] - 9.0f));
}
std::cout << "Max error: " << maxError << '\n';
// free memory
cudaFree(arr1);
cudaFree(arr2);
return 0;
}
|
f4803267d17905b34350ede9c34fbae037e030c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
typedef uint32_t col_sz; // node color
typedef uint32_t node; // graph node
typedef uint32_t node_sz;
extern "C" {
__global__ void initCurand(hiprandState_t* states, uint32_t seed, uint32_t nElem ) {
uint32_t tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < nElem) {
//states[tid] = hiprandState_t();
hiprand_init( seed, tid, 0, &states[tid] );
}
}
__global__ void initColoring(uint32_t nnodes, uint32_t * coloring_d, float nCol, hiprandState_t * states) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nnodes)
return;
float randnum = hiprand_uniform(&states[idx]);
int color = (int)(randnum * nCol);
//printf("color=%d\n", states[idx].d);
coloring_d[idx] = color;
//coloring_d[idx] = 0;
}
__global__ void conflictChecker(uint32_t nedges, uint32_t * conflictCounter_d, uint32_t * coloring_d, node_sz * edges) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nedges)
return;
uint32_t idx0 = idx * 2;
uint32_t idx1 = idx0 + 1;
uint32_t node0 = edges[idx0];
uint32_t node1 = edges[idx1];
uint32_t col0 = coloring_d[node0];
uint32_t col1 = coloring_d[node1];
conflictCounter_d[idx] = col0 == col1;
}
/**
* Parallel sum reduction inside a single warp
*/
__device__ void warpReduction(volatile float *sdata, uint32_t tid, uint32_t blockSize) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void sumReduction(uint32_t nedges, float * conflictCounter_d) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nedges)
return;
extern __shared__ float sdata[];
uint32_t tid = threadIdx.x;
uint32_t blockSize = blockDim.x;
uint32_t i = (blockSize * 2) * blockIdx.x + tid;
sdata[tid] = conflictCounter_d[i] + conflictCounter_d[i + blockSize];
__syncthreads();
//useless for blocks of dim <= 64
if (blockSize >= 512)
{
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
}
if (tid < 32)
//warpReduction<blockSize>(sdata, tid);
warpReduction(sdata, tid, blockSize);
if (tid == 0)
conflictCounter_d[blockIdx.x] = sdata[0];
}
__global__ void selectStarColoring(uint32_t nnodes, uint32_t * starColoring_d, float * qStar_d, col_sz nCol, uint32_t * coloring_d, node_sz * cumulDegs, node * neighs, bool * colorsChecker_d, uint32_t * orderedColors_d, hiprandState_t * states, float epsilon) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nnodes)
return;
uint32_t index = cumulDegs[idx]; //index of the node in neighs
uint32_t nneighs = cumulDegs[idx + 1] - index; //number of neighbors
uint32_t nodeCol = coloring_d[idx]; //node color
bool * colorsChecker = &(colorsChecker_d[idx * nCol]); //array used to set to 1 or 0 the colors occupied from the neighbors
for (int i = 0; i < nneighs; i++) {
colorsChecker[coloring_d[neighs[index + i]]] = 1;
}
uint32_t * orderedColors = &(orderedColors_d[idx * nCol]); //array containing previously occupied colors and then free ones
uint32_t Zp = nCol, Zn = 0; //number of free colors (p) and occupied colors (n)
for (int i = 0; i < nCol; i++)
{
orderedColors[Zn] += i * (1 - (1 - colorsChecker[i]));
orderedColors[Zp - 1] += i * (1 - colorsChecker[i]);
Zn += colorsChecker[i];
Zp -= 1 - colorsChecker[i];
}
Zp = nCol - Zn;
if (!Zp) //manage exception of no free colors
{
#ifdef FIXED_N_COLORS
starColoring_d[idx] = nodeCol;
qStar_d[idx] = 1;
#endif // FIXED_N_COLORS
#ifdef DYNAMIC_N_COLORS
starColoring_d[idx] = nodeCol;
qStar_d[idx] = 1;
#endif // DYNAMIC_N_COLORS
return;
}
float randnum = hiprand_uniform(&states[idx]); //random number
float threshold;
uint32_t selectedIndex = 0; //selected index for orderedColors to select the new color
if (colorsChecker[nodeCol]) //if node color is used by neighbors
{
threshold = 1 - epsilon * Zn; //threshold used to randomly determine whether to extract a free color or a busy one
if (randnum < threshold)
{
selectedIndex = ((randnum * Zp) / threshold) + Zn; //get the selected index
qStar_d[idx] = (1 - epsilon * Zn) / Zp; //save the probability of the color chosen
}
else
{
selectedIndex = ((randnum - threshold) * Zn) / (1 - threshold); //get the selected index
qStar_d[idx] = epsilon; //save the probability of the color chosen
}
starColoring_d[idx] = orderedColors[selectedIndex]; //save the new color
}
else
{
threshold = 1 - epsilon * (nCol - 1); //threshold used to randomly determine whether to extract a occupied color
//or keep the same
if (randnum < threshold)
{
starColoring_d[idx] = nodeCol; //keep the same color
qStar_d[idx] = 1 - ((nCol - 1) * epsilon); //save the probability of the color chosen
}
else
{
selectedIndex = ((randnum - threshold) * Zn) / (1 - threshold); //get the selected index
starColoring_d[idx] = orderedColors[selectedIndex]; //save the new color
qStar_d[idx] = epsilon; //save the probability of the color chosen
}
}
}
}
| f4803267d17905b34350ede9c34fbae037e030c5.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
typedef uint32_t col_sz; // node color
typedef uint32_t node; // graph node
typedef uint32_t node_sz;
extern "C" {
__global__ void initCurand(curandState* states, uint32_t seed, uint32_t nElem ) {
uint32_t tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < nElem) {
//states[tid] = curandState();
curand_init( seed, tid, 0, &states[tid] );
}
}
__global__ void initColoring(uint32_t nnodes, uint32_t * coloring_d, float nCol, curandState * states) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nnodes)
return;
float randnum = curand_uniform(&states[idx]);
int color = (int)(randnum * nCol);
//printf("color=%d\n", states[idx].d);
coloring_d[idx] = color;
//coloring_d[idx] = 0;
}
__global__ void conflictChecker(uint32_t nedges, uint32_t * conflictCounter_d, uint32_t * coloring_d, node_sz * edges) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nedges)
return;
uint32_t idx0 = idx * 2;
uint32_t idx1 = idx0 + 1;
uint32_t node0 = edges[idx0];
uint32_t node1 = edges[idx1];
uint32_t col0 = coloring_d[node0];
uint32_t col1 = coloring_d[node1];
conflictCounter_d[idx] = col0 == col1;
}
/**
* Parallel sum reduction inside a single warp
*/
__device__ void warpReduction(volatile float *sdata, uint32_t tid, uint32_t blockSize) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void sumReduction(uint32_t nedges, float * conflictCounter_d) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nedges)
return;
extern __shared__ float sdata[];
uint32_t tid = threadIdx.x;
uint32_t blockSize = blockDim.x;
uint32_t i = (blockSize * 2) * blockIdx.x + tid;
sdata[tid] = conflictCounter_d[i] + conflictCounter_d[i + blockSize];
__syncthreads();
//useless for blocks of dim <= 64
if (blockSize >= 512)
{
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
}
if (tid < 32)
//warpReduction<blockSize>(sdata, tid);
warpReduction(sdata, tid, blockSize);
if (tid == 0)
conflictCounter_d[blockIdx.x] = sdata[0];
}
__global__ void selectStarColoring(uint32_t nnodes, uint32_t * starColoring_d, float * qStar_d, col_sz nCol, uint32_t * coloring_d, node_sz * cumulDegs, node * neighs, bool * colorsChecker_d, uint32_t * orderedColors_d, curandState * states, float epsilon) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nnodes)
return;
uint32_t index = cumulDegs[idx]; //index of the node in neighs
uint32_t nneighs = cumulDegs[idx + 1] - index; //number of neighbors
uint32_t nodeCol = coloring_d[idx]; //node color
bool * colorsChecker = &(colorsChecker_d[idx * nCol]); //array used to set to 1 or 0 the colors occupied from the neighbors
for (int i = 0; i < nneighs; i++) {
colorsChecker[coloring_d[neighs[index + i]]] = 1;
}
uint32_t * orderedColors = &(orderedColors_d[idx * nCol]); //array containing previously occupied colors and then free ones
uint32_t Zp = nCol, Zn = 0; //number of free colors (p) and occupied colors (n)
for (int i = 0; i < nCol; i++)
{
orderedColors[Zn] += i * (1 - (1 - colorsChecker[i]));
orderedColors[Zp - 1] += i * (1 - colorsChecker[i]);
Zn += colorsChecker[i];
Zp -= 1 - colorsChecker[i];
}
Zp = nCol - Zn;
if (!Zp) //manage exception of no free colors
{
#ifdef FIXED_N_COLORS
starColoring_d[idx] = nodeCol;
qStar_d[idx] = 1;
#endif // FIXED_N_COLORS
#ifdef DYNAMIC_N_COLORS
starColoring_d[idx] = nodeCol;
qStar_d[idx] = 1;
#endif // DYNAMIC_N_COLORS
return;
}
float randnum = curand_uniform(&states[idx]); //random number
float threshold;
uint32_t selectedIndex = 0; //selected index for orderedColors to select the new color
if (colorsChecker[nodeCol]) //if node color is used by neighbors
{
threshold = 1 - epsilon * Zn; //threshold used to randomly determine whether to extract a free color or a busy one
if (randnum < threshold)
{
selectedIndex = ((randnum * Zp) / threshold) + Zn; //get the selected index
qStar_d[idx] = (1 - epsilon * Zn) / Zp; //save the probability of the color chosen
}
else
{
selectedIndex = ((randnum - threshold) * Zn) / (1 - threshold); //get the selected index
qStar_d[idx] = epsilon; //save the probability of the color chosen
}
starColoring_d[idx] = orderedColors[selectedIndex]; //save the new color
}
else
{
threshold = 1 - epsilon * (nCol - 1); //threshold used to randomly determine whether to extract a occupied color
//or keep the same
if (randnum < threshold)
{
starColoring_d[idx] = nodeCol; //keep the same color
qStar_d[idx] = 1 - ((nCol - 1) * epsilon); //save the probability of the color chosen
}
else
{
selectedIndex = ((randnum - threshold) * Zn) / (1 - threshold); //get the selected index
starColoring_d[idx] = orderedColors[selectedIndex]; //save the new color
qStar_d[idx] = epsilon; //save the probability of the color chosen
}
}
}
}
|
5e2b5ef982663881dcc4f5aca60dcaceb0611e4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <scitbx/array_family/boost_python/flex_fwd.h>
#include <cudatbx/cuda_base.cuh>
#include <simtbx/gpu/simulation.h>
#include <simtbx/gpu/simulation.cuh>
#include <scitbx/array_family/flex_types.h>
#define THREADS_PER_BLOCK_X 128
#define THREADS_PER_BLOCK_Y 1
#define THREADS_PER_BLOCK_TOTAL (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y)
namespace simtbx {
namespace gpu {
namespace af = scitbx::af;
//refactor later into helper file
static hipError_t cudaMemcpyVectorDoubleToDevice(CUDAREAL *dst, const double *src, size_t vector_items) {
CUDAREAL * temp = new CUDAREAL[vector_items];
for (size_t i = 0; i < vector_items; i++) {
temp[i] = src[i];
}
hipError_t ret = hipMemcpy(dst, temp, sizeof(*dst) * vector_items, hipMemcpyHostToDevice);
delete temp;
return ret;
}
/* make a unit vector pointing in same direction and report magnitude (both args can be same vector) */
double cpu_unitize(const double * vector, double * new_unit_vector) {
double v1 = vector[1];
double v2 = vector[2];
double v3 = vector[3];
double mag = sqrt(v1 * v1 + v2 * v2 + v3 * v3);
if (mag != 0.0) {
/* normalize it */
new_unit_vector[0] = mag;
new_unit_vector[1] = v1 / mag;
new_unit_vector[2] = v2 / mag;
new_unit_vector[3] = v3 / mag;
} else {
/* can't normalize, report zero vector */
new_unit_vector[0] = 0.0;
new_unit_vector[1] = 0.0;
new_unit_vector[2] = 0.0;
new_unit_vector[3] = 0.0;
}
return mag;
}
void
exascale_api::show(){
SCITBX_EXAMINE(SIM.roi_xmin);
SCITBX_EXAMINE(SIM.roi_xmax);
SCITBX_EXAMINE(SIM.roi_ymin);
SCITBX_EXAMINE(SIM.roi_ymax);
SCITBX_EXAMINE(SIM.oversample);
SCITBX_EXAMINE(SIM.point_pixel);
SCITBX_EXAMINE(SIM.pixel_size);
SCITBX_EXAMINE(cu_subpixel_size);
SCITBX_EXAMINE(cu_steps);
SCITBX_EXAMINE(SIM.detector_thickstep);
SCITBX_EXAMINE(SIM.detector_thicksteps);
SCITBX_EXAMINE(SIM.detector_thick);
SCITBX_EXAMINE(SIM.detector_attnlen);
SCITBX_EXAMINE(SIM.curved_detector);
SCITBX_EXAMINE(SIM.distance);
SCITBX_EXAMINE(SIM.close_distance);
SCITBX_EXAMINE(SIM.dmin);
SCITBX_EXAMINE(SIM.phi0);
SCITBX_EXAMINE(SIM.phistep);
SCITBX_EXAMINE(SIM.phisteps);
SCITBX_EXAMINE(SIM.sources);
SCITBX_EXAMINE(SIM.mosaic_spread);
SCITBX_EXAMINE(SIM.mosaic_domains);
SCITBX_EXAMINE(SIM.Na);
SCITBX_EXAMINE(SIM.Nb);
SCITBX_EXAMINE(SIM.Nc);
SCITBX_EXAMINE(SIM.fluence);
SCITBX_EXAMINE(SIM.spot_scale);
SCITBX_EXAMINE(SIM.integral_form);
SCITBX_EXAMINE(SIM.default_F);
SCITBX_EXAMINE(SIM.interpolate);
SCITBX_EXAMINE(SIM.nopolar);
SCITBX_EXAMINE(SIM.polarization);
SCITBX_EXAMINE(SIM.fudge);
}
void
exascale_api::add_energy_channel_from_gpu_amplitudes(
int const& ichannel,
simtbx::gpu::gpu_energy_channels & gec,
simtbx::gpu::gpu_detector & gdt,
double const& weight
){
cudaSafeCall(hipSetDevice(SIM.device_Id));
// transfer source_I, source_lambda
// the int arguments are for sizes of the arrays
int source_count = SIM.sources;
af::shared<double> weighted_sources_I = af::shared<double>(source_count);
double* wptr = weighted_sources_I.begin();
for (std::size_t iwt = 0; iwt < source_count; iwt++){wptr[iwt] = weight*(SIM.source_I[iwt]);}
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, wptr, SIM.sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, SIM.sources));
// magic happens here: take pointer from singleton, temporarily use it for add Bragg iteration:
cu_current_channel_Fhkl = gec.d_channel_Fhkl[ichannel];
hipDeviceProp_t deviceProps = { 0 };
cudaSafeCall(hipGetDeviceProperties(&deviceProps, SIM.device_Id));
int smCount = deviceProps.multiProcessorCount;
dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y);
dim3 numBlocks(smCount * 8, 1);
std::size_t panel_size = gdt.cu_slow_pixels * gdt.cu_fast_pixels;
const int vec_len = 4;
// the for loop around panels. Offsets given.
for (std::size_t idx_p = 0; idx_p < gdt.cu_n_panels; idx_p++){
// loop thru panels and increment the array ptrs
hipLaunchKernelGGL(( nanoBraggSpotsCUDAKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
gdt.cu_slow_pixels, gdt.cu_fast_pixels, SIM.roi_xmin,
SIM.roi_xmax, SIM.roi_ymin, SIM.roi_ymax, SIM.oversample, SIM.point_pixel,
SIM.pixel_size, cu_subpixel_size, cu_steps, SIM.detector_thickstep, SIM.detector_thicksteps,
SIM.detector_thick, SIM.detector_attnlen,
&(gdt.cu_sdet_vector[vec_len * idx_p]),
&(gdt.cu_fdet_vector[vec_len * idx_p]),
&(gdt.cu_odet_vector[vec_len * idx_p]),
&(gdt.cu_pix0_vector[vec_len * idx_p]),
SIM.curved_detector, gdt.metrology.dists[idx_p], gdt.metrology.dists[idx_p], cu_beam_vector,
gdt.metrology.Xbeam[idx_p], gdt.metrology.Ybeam[idx_p],
SIM.dmin, SIM.phi0, SIM.phistep, SIM.phisteps, cu_spindle_vector,
SIM.sources, cu_source_X, cu_source_Y, cu_source_Z,
cu_source_I, cu_source_lambda, cu_a0, cu_b0,
cu_c0, SIM.xtal_shape, SIM.mosaic_spread, SIM.mosaic_domains, cu_mosaic_umats,
SIM.Na, SIM.Nb, SIM.Nc, SIM.V_cell,
cu_water_size, cu_water_F, cu_water_MW, simtbx::nanoBragg::r_e_sqr, SIM.fluence,
simtbx::nanoBragg::Avogadro, SIM.spot_scale, SIM.integral_form, SIM.default_F,
SIM.interpolate, cu_current_channel_Fhkl, gec.cu_FhklParams, SIM.nopolar,
cu_polar_vector, SIM.polarization, SIM.fudge,
/* &(gdt.cu_maskimage[panel_size * idx_p]), */
NULL,
&(gdt.cu_floatimage[panel_size * idx_p]) /*out*/,
&(gdt.cu_omega_reduction[panel_size * idx_p]) /*out*/,
&(gdt.cu_max_I_x_reduction[panel_size * idx_p]) /*out*/,
&(gdt.cu_max_I_y_reduction[panel_size * idx_p]) /*out*/,
&(gdt.cu_rangemap[panel_size * idx_p]) /*out*/);
cudaSafeCall(hipPeekAtLastError());
}
cudaSafeCall(hipDeviceSynchronize());
//don't want to free the gec data when the nanoBragg goes out of scope, so switch the pointer
cu_current_channel_Fhkl = NULL;
hipLaunchKernelGGL(( add_array_CUDAKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, gdt.cu_accumulate_floatimage,
gdt.cu_floatimage,
gdt.cu_n_panels * gdt.cu_slow_pixels * gdt.cu_fast_pixels);
}
void
exascale_api::add_energy_channel_mask_allpanel(
int const& ichannel,
simtbx::gpu::gpu_energy_channels & gec,
simtbx::gpu::gpu_detector & gdt,
af::shared<bool> all_panel_mask
){
// here or there, need to convert the all_panel_mask (3D map) into a 1D list of accepted pixels
// coordinates for the active pixel list are absolute offsets into the detector array
af::shared<std::size_t> active_pixel_list;
const bool* jptr = all_panel_mask.begin();
for (int j=0; j < all_panel_mask.size(); ++j){
if (jptr[j]) {
active_pixel_list.push_back(j);
}
}
add_energy_channel_mask_allpanel(
ichannel, gec, gdt, active_pixel_list);
}
void
exascale_api::add_energy_channel_mask_allpanel(
int const& ichannel,
simtbx::gpu::gpu_energy_channels & gec,
simtbx::gpu::gpu_detector & gdt,
af::shared<std::size_t> const active_pixel_list
){
cudaSafeCall(hipSetDevice(SIM.device_Id));
gdt.set_active_pixels_on_GPU(active_pixel_list);
// transfer source_I, source_lambda
// the int arguments are for sizes of the arrays
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, SIM.source_I, SIM.sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, SIM.sources));
// magic happens here: take pointer from singleton, temporarily use it for add Bragg iteration:
cu_current_channel_Fhkl = gec.d_channel_Fhkl[ichannel];
hipDeviceProp_t deviceProps = { 0 };
cudaSafeCall(hipGetDeviceProperties(&deviceProps, SIM.device_Id));
int smCount = deviceProps.multiProcessorCount;
dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y);
dim3 numBlocks(smCount * 8, 1);
const int vec_len = 4;
// for call for all panels at the same time
hipLaunchKernelGGL(( debranch_maskall_CUDAKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
gdt.cu_n_panels, gdt.cu_slow_pixels, gdt.cu_fast_pixels, active_pixel_list.size(),
SIM.oversample, SIM.point_pixel,
SIM.pixel_size, cu_subpixel_size, cu_steps,
SIM.detector_thickstep, SIM.detector_thicksteps,
SIM.detector_thick, SIM.detector_attnlen,
vec_len,
gdt.cu_sdet_vector,
gdt.cu_fdet_vector,
gdt.cu_odet_vector,
gdt.cu_pix0_vector,
gdt.cu_distance, gdt.cu_distance, cu_beam_vector,
gdt.cu_Xbeam, gdt.cu_Ybeam,
SIM.dmin, SIM.phi0, SIM.phistep, SIM.phisteps, cu_spindle_vector,
SIM.sources, cu_source_X, cu_source_Y, cu_source_Z,
cu_source_I, cu_source_lambda, cu_a0, cu_b0,
cu_c0, SIM.xtal_shape, SIM.mosaic_domains, cu_mosaic_umats,
SIM.Na, SIM.Nb, SIM.Nc, SIM.V_cell,
cu_water_size, cu_water_F, cu_water_MW, simtbx::nanoBragg::r_e_sqr, SIM.fluence,
simtbx::nanoBragg::Avogadro, SIM.spot_scale, SIM.integral_form, SIM.default_F,
cu_current_channel_Fhkl, gec.cu_FhklParams, SIM.nopolar,
cu_polar_vector, SIM.polarization, SIM.fudge,
gdt.cu_active_pixel_list,
gdt.cu_floatimage /*out*/,
gdt.cu_omega_reduction /*out*/,
gdt.cu_max_I_x_reduction /*out*/,
gdt.cu_max_I_y_reduction /*out*/,
gdt.cu_rangemap /*out*/);
cudaSafeCall(hipPeekAtLastError());
cudaSafeCall(hipDeviceSynchronize());
//don't want to free the gec data when the nanoBragg goes out of scope, so switch the pointer
cu_current_channel_Fhkl = NULL;
hipLaunchKernelGGL(( add_array_CUDAKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, gdt.cu_accumulate_floatimage,
gdt.cu_floatimage,
gdt.cu_n_panels * gdt.cu_slow_pixels * gdt.cu_fast_pixels);
}
void
exascale_api::add_background(simtbx::gpu::gpu_detector & gdt, int const& override_source){
cudaSafeCall(hipSetDevice(SIM.device_Id));
// transfer source_I, source_lambda
// the int arguments are for sizes of the arrays
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, SIM.source_I, SIM.sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, SIM.sources));
CUDAREAL * cu_stol_of;
cudaSafeCall(hipMalloc((void ** )&cu_stol_of, sizeof(*cu_stol_of) * SIM.stols));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_stol_of, SIM.stol_of, SIM.stols));
CUDAREAL * cu_Fbg_of;
cudaSafeCall(hipMalloc((void ** )&cu_Fbg_of, sizeof(*cu_Fbg_of) * SIM.stols));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_Fbg_of, SIM.Fbg_of, SIM.stols));
hipDeviceProp_t deviceProps = { 0 };
cudaSafeCall(hipGetDeviceProperties(&deviceProps, SIM.device_Id));
int smCount = deviceProps.multiProcessorCount;
dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y);
dim3 numBlocks(smCount * 8, 1);
// initialize the device memory within a kernel.
// modify the arguments to initialize multipanel detector.
hipLaunchKernelGGL(( nanoBraggSpotsInitCUDAKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
gdt.cu_n_panels * gdt.cu_slow_pixels, gdt.cu_fast_pixels,
gdt.cu_floatimage, gdt.cu_omega_reduction,
gdt.cu_max_I_x_reduction, gdt.cu_max_I_y_reduction,
gdt.cu_rangemap);
cudaSafeCall(hipPeekAtLastError());
cudaSafeCall(hipDeviceSynchronize());
std::size_t panel_size = gdt.cu_slow_pixels * gdt.cu_fast_pixels;
const int vec_len = 4;
// the for loop around panels. Offsets given.
for (std::size_t idx_p = 0; idx_p < gdt.cu_n_panels; idx_p++){
hipLaunchKernelGGL(( add_background_CUDAKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, SIM.sources,
SIM.oversample, override_source,
SIM.pixel_size, gdt.cu_slow_pixels, gdt.cu_fast_pixels, SIM.detector_thicksteps,
SIM.detector_thickstep, SIM.detector_attnlen,
&(gdt.cu_sdet_vector[vec_len * idx_p]),
&(gdt.cu_fdet_vector[vec_len * idx_p]),
&(gdt.cu_odet_vector[vec_len * idx_p]),
&(gdt.cu_pix0_vector[vec_len * idx_p]),
gdt.metrology.dists[idx_p], SIM.point_pixel, SIM.detector_thick,
cu_source_X, cu_source_Y, cu_source_Z,
cu_source_lambda, cu_source_I,
SIM.stols, cu_stol_of, cu_Fbg_of,
SIM.nopolar, SIM.polarization, cu_polar_vector,
simtbx::nanoBragg::r_e_sqr, SIM.fluence, SIM.amorphous_molecules,
&(gdt.cu_floatimage[panel_size * idx_p]) /*out*/);
cudaSafeCall(hipPeekAtLastError());
}
cudaSafeCall(hipDeviceSynchronize());
hipLaunchKernelGGL(( add_array_CUDAKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, gdt.cu_accumulate_floatimage,
gdt.cu_floatimage,
gdt.cu_n_panels * gdt.cu_slow_pixels * gdt.cu_fast_pixels);
cudaSafeCall(hipFree(cu_stol_of));
cudaSafeCall(hipFree(cu_Fbg_of));
}
void
exascale_api::allocate(){
cudaSafeCall(hipSetDevice(SIM.device_Id));
/* water_size not defined in class, CLI argument, defaults to 0 */
double water_size = 0.0;
/* missing constants */
double water_F = 2.57;
double water_MW = 18.0;
/* make sure we are normalizing with the right number of sub-steps */
int nb_steps = SIM.phisteps*SIM.mosaic_domains*SIM.oversample*SIM.oversample;
double nb_subpixel_size = SIM.pixel_size/SIM.oversample;
/*create transfer arguments to device space*/
cu_subpixel_size = nb_subpixel_size; //check for conflict?
cu_steps = nb_steps; //check for conflict?
/* presumably thickness and attenuation can be migrated to the gpu detector class XXX FIXME*/
//cu_detector_thick = SIM.detector_thick;
//cu_detector_mu = SIM.detector_attnlen; // synonyms
//cu_distance = SIM.distance; /* distance and close distance, detector properties? XXX FIXME */
//cu_close_distance = SIM.close_distance;
cu_water_size = water_size;
cu_water_F = water_F;
cu_water_MW = water_MW;
const int vector_length = 4;
int cu_sources = SIM.sources;
int cu_mosaic_domains = SIM.mosaic_domains;
cudaSafeCall(hipMalloc((void ** )&cu_beam_vector, sizeof(*cu_beam_vector) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_beam_vector, SIM.beam_vector, vector_length));
cudaSafeCall(hipMalloc((void ** )&cu_spindle_vector, sizeof(*cu_spindle_vector) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_spindle_vector, SIM.spindle_vector, vector_length));
cudaSafeCall(hipMalloc((void ** )&cu_a0, sizeof(*cu_a0) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_a0, SIM.a0, vector_length));
cudaSafeCall(hipMalloc((void ** )&cu_b0, sizeof(*cu_b0) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_b0, SIM.b0, vector_length));
cudaSafeCall(hipMalloc((void ** )&cu_c0, sizeof(*cu_c0) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_c0, SIM.c0, vector_length));
// Unitize polar vector before sending it to the GPU.
// Optimization do it only once here rather than multiple time per pixel in the GPU.
double polar_vector_unitized[4];
cpu_unitize(SIM.polar_vector, polar_vector_unitized);
cudaSafeCall(hipMalloc((void ** )&cu_polar_vector, sizeof(*cu_polar_vector) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_polar_vector, polar_vector_unitized, vector_length));
cudaSafeCall(hipMalloc((void ** )&cu_source_X, sizeof(*cu_source_X) * cu_sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_X, SIM.source_X, cu_sources));
cudaSafeCall(hipMalloc((void ** )&cu_source_Y, sizeof(*cu_source_Y) * cu_sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_Y, SIM.source_Y, cu_sources));
cudaSafeCall(hipMalloc((void ** )&cu_source_Z, sizeof(*cu_source_Z) * cu_sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_Z, SIM.source_Z, cu_sources));
cudaSafeCall(hipMalloc((void ** )&cu_source_I, sizeof(*cu_source_I) * cu_sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, SIM.source_I, cu_sources));
cudaSafeCall(hipMalloc((void ** )&cu_source_lambda, sizeof(*cu_source_lambda) * cu_sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, cu_sources));
cudaSafeCall(hipMalloc((void ** )&cu_mosaic_umats, sizeof(*cu_mosaic_umats) * cu_mosaic_domains * 9));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_mosaic_umats, SIM.mosaic_umats, cu_mosaic_domains * 9));
};
exascale_api::~exascale_api(){
cudaSafeCall(hipSetDevice(SIM.device_Id));
cudaSafeCall(hipFree(cu_beam_vector));
cudaSafeCall(hipFree(cu_spindle_vector));
cudaSafeCall(hipFree(cu_source_X));
cudaSafeCall(hipFree(cu_source_Y));
cudaSafeCall(hipFree(cu_source_Z));
cudaSafeCall(hipFree(cu_source_I));
cudaSafeCall(hipFree(cu_source_lambda));
cudaSafeCall(hipFree(cu_a0));
cudaSafeCall(hipFree(cu_b0));
cudaSafeCall(hipFree(cu_c0));
cudaSafeCall(hipFree(cu_mosaic_umats));
cudaSafeCall(hipFree(cu_polar_vector));
}
} // gpu
} // simtbx
| 5e2b5ef982663881dcc4f5aca60dcaceb0611e4d.cu | #include <scitbx/array_family/boost_python/flex_fwd.h>
#include <cudatbx/cuda_base.cuh>
#include <simtbx/gpu/simulation.h>
#include <simtbx/gpu/simulation.cuh>
#include <scitbx/array_family/flex_types.h>
#define THREADS_PER_BLOCK_X 128
#define THREADS_PER_BLOCK_Y 1
#define THREADS_PER_BLOCK_TOTAL (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y)
namespace simtbx {
namespace gpu {
namespace af = scitbx::af;
//refactor later into helper file
static cudaError_t cudaMemcpyVectorDoubleToDevice(CUDAREAL *dst, const double *src, size_t vector_items) {
CUDAREAL * temp = new CUDAREAL[vector_items];
for (size_t i = 0; i < vector_items; i++) {
temp[i] = src[i];
}
cudaError_t ret = cudaMemcpy(dst, temp, sizeof(*dst) * vector_items, cudaMemcpyHostToDevice);
delete temp;
return ret;
}
/* make a unit vector pointing in same direction and report magnitude (both args can be same vector) */
double cpu_unitize(const double * vector, double * new_unit_vector) {
double v1 = vector[1];
double v2 = vector[2];
double v3 = vector[3];
double mag = sqrt(v1 * v1 + v2 * v2 + v3 * v3);
if (mag != 0.0) {
/* normalize it */
new_unit_vector[0] = mag;
new_unit_vector[1] = v1 / mag;
new_unit_vector[2] = v2 / mag;
new_unit_vector[3] = v3 / mag;
} else {
/* can't normalize, report zero vector */
new_unit_vector[0] = 0.0;
new_unit_vector[1] = 0.0;
new_unit_vector[2] = 0.0;
new_unit_vector[3] = 0.0;
}
return mag;
}
void
exascale_api::show(){
SCITBX_EXAMINE(SIM.roi_xmin);
SCITBX_EXAMINE(SIM.roi_xmax);
SCITBX_EXAMINE(SIM.roi_ymin);
SCITBX_EXAMINE(SIM.roi_ymax);
SCITBX_EXAMINE(SIM.oversample);
SCITBX_EXAMINE(SIM.point_pixel);
SCITBX_EXAMINE(SIM.pixel_size);
SCITBX_EXAMINE(cu_subpixel_size);
SCITBX_EXAMINE(cu_steps);
SCITBX_EXAMINE(SIM.detector_thickstep);
SCITBX_EXAMINE(SIM.detector_thicksteps);
SCITBX_EXAMINE(SIM.detector_thick);
SCITBX_EXAMINE(SIM.detector_attnlen);
SCITBX_EXAMINE(SIM.curved_detector);
SCITBX_EXAMINE(SIM.distance);
SCITBX_EXAMINE(SIM.close_distance);
SCITBX_EXAMINE(SIM.dmin);
SCITBX_EXAMINE(SIM.phi0);
SCITBX_EXAMINE(SIM.phistep);
SCITBX_EXAMINE(SIM.phisteps);
SCITBX_EXAMINE(SIM.sources);
SCITBX_EXAMINE(SIM.mosaic_spread);
SCITBX_EXAMINE(SIM.mosaic_domains);
SCITBX_EXAMINE(SIM.Na);
SCITBX_EXAMINE(SIM.Nb);
SCITBX_EXAMINE(SIM.Nc);
SCITBX_EXAMINE(SIM.fluence);
SCITBX_EXAMINE(SIM.spot_scale);
SCITBX_EXAMINE(SIM.integral_form);
SCITBX_EXAMINE(SIM.default_F);
SCITBX_EXAMINE(SIM.interpolate);
SCITBX_EXAMINE(SIM.nopolar);
SCITBX_EXAMINE(SIM.polarization);
SCITBX_EXAMINE(SIM.fudge);
}
void
exascale_api::add_energy_channel_from_gpu_amplitudes(
int const& ichannel,
simtbx::gpu::gpu_energy_channels & gec,
simtbx::gpu::gpu_detector & gdt,
double const& weight
){
cudaSafeCall(cudaSetDevice(SIM.device_Id));
// transfer source_I, source_lambda
// the int arguments are for sizes of the arrays
int source_count = SIM.sources;
af::shared<double> weighted_sources_I = af::shared<double>(source_count);
double* wptr = weighted_sources_I.begin();
for (std::size_t iwt = 0; iwt < source_count; iwt++){wptr[iwt] = weight*(SIM.source_I[iwt]);}
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, wptr, SIM.sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, SIM.sources));
// magic happens here: take pointer from singleton, temporarily use it for add Bragg iteration:
cu_current_channel_Fhkl = gec.d_channel_Fhkl[ichannel];
cudaDeviceProp deviceProps = { 0 };
cudaSafeCall(cudaGetDeviceProperties(&deviceProps, SIM.device_Id));
int smCount = deviceProps.multiProcessorCount;
dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y);
dim3 numBlocks(smCount * 8, 1);
std::size_t panel_size = gdt.cu_slow_pixels * gdt.cu_fast_pixels;
const int vec_len = 4;
// the for loop around panels. Offsets given.
for (std::size_t idx_p = 0; idx_p < gdt.cu_n_panels; idx_p++){
// loop thru panels and increment the array ptrs
nanoBraggSpotsCUDAKernel<<<numBlocks, threadsPerBlock>>>(
gdt.cu_slow_pixels, gdt.cu_fast_pixels, SIM.roi_xmin,
SIM.roi_xmax, SIM.roi_ymin, SIM.roi_ymax, SIM.oversample, SIM.point_pixel,
SIM.pixel_size, cu_subpixel_size, cu_steps, SIM.detector_thickstep, SIM.detector_thicksteps,
SIM.detector_thick, SIM.detector_attnlen,
&(gdt.cu_sdet_vector[vec_len * idx_p]),
&(gdt.cu_fdet_vector[vec_len * idx_p]),
&(gdt.cu_odet_vector[vec_len * idx_p]),
&(gdt.cu_pix0_vector[vec_len * idx_p]),
SIM.curved_detector, gdt.metrology.dists[idx_p], gdt.metrology.dists[idx_p], cu_beam_vector,
gdt.metrology.Xbeam[idx_p], gdt.metrology.Ybeam[idx_p],
SIM.dmin, SIM.phi0, SIM.phistep, SIM.phisteps, cu_spindle_vector,
SIM.sources, cu_source_X, cu_source_Y, cu_source_Z,
cu_source_I, cu_source_lambda, cu_a0, cu_b0,
cu_c0, SIM.xtal_shape, SIM.mosaic_spread, SIM.mosaic_domains, cu_mosaic_umats,
SIM.Na, SIM.Nb, SIM.Nc, SIM.V_cell,
cu_water_size, cu_water_F, cu_water_MW, simtbx::nanoBragg::r_e_sqr, SIM.fluence,
simtbx::nanoBragg::Avogadro, SIM.spot_scale, SIM.integral_form, SIM.default_F,
SIM.interpolate, cu_current_channel_Fhkl, gec.cu_FhklParams, SIM.nopolar,
cu_polar_vector, SIM.polarization, SIM.fudge,
/* &(gdt.cu_maskimage[panel_size * idx_p]), */
NULL,
&(gdt.cu_floatimage[panel_size * idx_p]) /*out*/,
&(gdt.cu_omega_reduction[panel_size * idx_p]) /*out*/,
&(gdt.cu_max_I_x_reduction[panel_size * idx_p]) /*out*/,
&(gdt.cu_max_I_y_reduction[panel_size * idx_p]) /*out*/,
&(gdt.cu_rangemap[panel_size * idx_p]) /*out*/);
cudaSafeCall(cudaPeekAtLastError());
}
cudaSafeCall(cudaDeviceSynchronize());
//don't want to free the gec data when the nanoBragg goes out of scope, so switch the pointer
cu_current_channel_Fhkl = NULL;
add_array_CUDAKernel<<<numBlocks, threadsPerBlock>>>(gdt.cu_accumulate_floatimage,
gdt.cu_floatimage,
gdt.cu_n_panels * gdt.cu_slow_pixels * gdt.cu_fast_pixels);
}
void
exascale_api::add_energy_channel_mask_allpanel(
int const& ichannel,
simtbx::gpu::gpu_energy_channels & gec,
simtbx::gpu::gpu_detector & gdt,
af::shared<bool> all_panel_mask
){
// here or there, need to convert the all_panel_mask (3D map) into a 1D list of accepted pixels
// coordinates for the active pixel list are absolute offsets into the detector array
af::shared<std::size_t> active_pixel_list;
const bool* jptr = all_panel_mask.begin();
for (int j=0; j < all_panel_mask.size(); ++j){
if (jptr[j]) {
active_pixel_list.push_back(j);
}
}
add_energy_channel_mask_allpanel(
ichannel, gec, gdt, active_pixel_list);
}
void
exascale_api::add_energy_channel_mask_allpanel(
int const& ichannel,
simtbx::gpu::gpu_energy_channels & gec,
simtbx::gpu::gpu_detector & gdt,
af::shared<std::size_t> const active_pixel_list
){
cudaSafeCall(cudaSetDevice(SIM.device_Id));
gdt.set_active_pixels_on_GPU(active_pixel_list);
// transfer source_I, source_lambda
// the int arguments are for sizes of the arrays
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, SIM.source_I, SIM.sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, SIM.sources));
// magic happens here: take pointer from singleton, temporarily use it for add Bragg iteration:
cu_current_channel_Fhkl = gec.d_channel_Fhkl[ichannel];
cudaDeviceProp deviceProps = { 0 };
cudaSafeCall(cudaGetDeviceProperties(&deviceProps, SIM.device_Id));
int smCount = deviceProps.multiProcessorCount;
dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y);
dim3 numBlocks(smCount * 8, 1);
const int vec_len = 4;
// for call for all panels at the same time
debranch_maskall_CUDAKernel<<<numBlocks, threadsPerBlock>>>(
gdt.cu_n_panels, gdt.cu_slow_pixels, gdt.cu_fast_pixels, active_pixel_list.size(),
SIM.oversample, SIM.point_pixel,
SIM.pixel_size, cu_subpixel_size, cu_steps,
SIM.detector_thickstep, SIM.detector_thicksteps,
SIM.detector_thick, SIM.detector_attnlen,
vec_len,
gdt.cu_sdet_vector,
gdt.cu_fdet_vector,
gdt.cu_odet_vector,
gdt.cu_pix0_vector,
gdt.cu_distance, gdt.cu_distance, cu_beam_vector,
gdt.cu_Xbeam, gdt.cu_Ybeam,
SIM.dmin, SIM.phi0, SIM.phistep, SIM.phisteps, cu_spindle_vector,
SIM.sources, cu_source_X, cu_source_Y, cu_source_Z,
cu_source_I, cu_source_lambda, cu_a0, cu_b0,
cu_c0, SIM.xtal_shape, SIM.mosaic_domains, cu_mosaic_umats,
SIM.Na, SIM.Nb, SIM.Nc, SIM.V_cell,
cu_water_size, cu_water_F, cu_water_MW, simtbx::nanoBragg::r_e_sqr, SIM.fluence,
simtbx::nanoBragg::Avogadro, SIM.spot_scale, SIM.integral_form, SIM.default_F,
cu_current_channel_Fhkl, gec.cu_FhklParams, SIM.nopolar,
cu_polar_vector, SIM.polarization, SIM.fudge,
gdt.cu_active_pixel_list,
gdt.cu_floatimage /*out*/,
gdt.cu_omega_reduction /*out*/,
gdt.cu_max_I_x_reduction /*out*/,
gdt.cu_max_I_y_reduction /*out*/,
gdt.cu_rangemap /*out*/);
cudaSafeCall(cudaPeekAtLastError());
cudaSafeCall(cudaDeviceSynchronize());
//don't want to free the gec data when the nanoBragg goes out of scope, so switch the pointer
cu_current_channel_Fhkl = NULL;
add_array_CUDAKernel<<<numBlocks, threadsPerBlock>>>(gdt.cu_accumulate_floatimage,
gdt.cu_floatimage,
gdt.cu_n_panels * gdt.cu_slow_pixels * gdt.cu_fast_pixels);
}
void
exascale_api::add_background(simtbx::gpu::gpu_detector & gdt, int const& override_source){
cudaSafeCall(cudaSetDevice(SIM.device_Id));
// transfer source_I, source_lambda
// the int arguments are for sizes of the arrays
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, SIM.source_I, SIM.sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, SIM.sources));
CUDAREAL * cu_stol_of;
cudaSafeCall(cudaMalloc((void ** )&cu_stol_of, sizeof(*cu_stol_of) * SIM.stols));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_stol_of, SIM.stol_of, SIM.stols));
CUDAREAL * cu_Fbg_of;
cudaSafeCall(cudaMalloc((void ** )&cu_Fbg_of, sizeof(*cu_Fbg_of) * SIM.stols));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_Fbg_of, SIM.Fbg_of, SIM.stols));
cudaDeviceProp deviceProps = { 0 };
cudaSafeCall(cudaGetDeviceProperties(&deviceProps, SIM.device_Id));
int smCount = deviceProps.multiProcessorCount;
dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y);
dim3 numBlocks(smCount * 8, 1);
// initialize the device memory within a kernel.
// modify the arguments to initialize multipanel detector.
nanoBraggSpotsInitCUDAKernel<<<numBlocks, threadsPerBlock>>>(
gdt.cu_n_panels * gdt.cu_slow_pixels, gdt.cu_fast_pixels,
gdt.cu_floatimage, gdt.cu_omega_reduction,
gdt.cu_max_I_x_reduction, gdt.cu_max_I_y_reduction,
gdt.cu_rangemap);
cudaSafeCall(cudaPeekAtLastError());
cudaSafeCall(cudaDeviceSynchronize());
std::size_t panel_size = gdt.cu_slow_pixels * gdt.cu_fast_pixels;
const int vec_len = 4;
// the for loop around panels. Offsets given.
for (std::size_t idx_p = 0; idx_p < gdt.cu_n_panels; idx_p++){
add_background_CUDAKernel<<<numBlocks, threadsPerBlock>>>(SIM.sources,
SIM.oversample, override_source,
SIM.pixel_size, gdt.cu_slow_pixels, gdt.cu_fast_pixels, SIM.detector_thicksteps,
SIM.detector_thickstep, SIM.detector_attnlen,
&(gdt.cu_sdet_vector[vec_len * idx_p]),
&(gdt.cu_fdet_vector[vec_len * idx_p]),
&(gdt.cu_odet_vector[vec_len * idx_p]),
&(gdt.cu_pix0_vector[vec_len * idx_p]),
gdt.metrology.dists[idx_p], SIM.point_pixel, SIM.detector_thick,
cu_source_X, cu_source_Y, cu_source_Z,
cu_source_lambda, cu_source_I,
SIM.stols, cu_stol_of, cu_Fbg_of,
SIM.nopolar, SIM.polarization, cu_polar_vector,
simtbx::nanoBragg::r_e_sqr, SIM.fluence, SIM.amorphous_molecules,
&(gdt.cu_floatimage[panel_size * idx_p]) /*out*/);
cudaSafeCall(cudaPeekAtLastError());
}
cudaSafeCall(cudaDeviceSynchronize());
add_array_CUDAKernel<<<numBlocks, threadsPerBlock>>>(gdt.cu_accumulate_floatimage,
gdt.cu_floatimage,
gdt.cu_n_panels * gdt.cu_slow_pixels * gdt.cu_fast_pixels);
cudaSafeCall(cudaFree(cu_stol_of));
cudaSafeCall(cudaFree(cu_Fbg_of));
}
void
exascale_api::allocate(){
cudaSafeCall(cudaSetDevice(SIM.device_Id));
/* water_size not defined in class, CLI argument, defaults to 0 */
double water_size = 0.0;
/* missing constants */
double water_F = 2.57;
double water_MW = 18.0;
/* make sure we are normalizing with the right number of sub-steps */
int nb_steps = SIM.phisteps*SIM.mosaic_domains*SIM.oversample*SIM.oversample;
double nb_subpixel_size = SIM.pixel_size/SIM.oversample;
/*create transfer arguments to device space*/
cu_subpixel_size = nb_subpixel_size; //check for conflict?
cu_steps = nb_steps; //check for conflict?
/* presumably thickness and attenuation can be migrated to the gpu detector class XXX FIXME*/
//cu_detector_thick = SIM.detector_thick;
//cu_detector_mu = SIM.detector_attnlen; // synonyms
//cu_distance = SIM.distance; /* distance and close distance, detector properties? XXX FIXME */
//cu_close_distance = SIM.close_distance;
cu_water_size = water_size;
cu_water_F = water_F;
cu_water_MW = water_MW;
const int vector_length = 4;
int cu_sources = SIM.sources;
int cu_mosaic_domains = SIM.mosaic_domains;
cudaSafeCall(cudaMalloc((void ** )&cu_beam_vector, sizeof(*cu_beam_vector) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_beam_vector, SIM.beam_vector, vector_length));
cudaSafeCall(cudaMalloc((void ** )&cu_spindle_vector, sizeof(*cu_spindle_vector) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_spindle_vector, SIM.spindle_vector, vector_length));
cudaSafeCall(cudaMalloc((void ** )&cu_a0, sizeof(*cu_a0) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_a0, SIM.a0, vector_length));
cudaSafeCall(cudaMalloc((void ** )&cu_b0, sizeof(*cu_b0) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_b0, SIM.b0, vector_length));
cudaSafeCall(cudaMalloc((void ** )&cu_c0, sizeof(*cu_c0) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_c0, SIM.c0, vector_length));
// Unitize polar vector before sending it to the GPU.
// Optimization do it only once here rather than multiple time per pixel in the GPU.
double polar_vector_unitized[4];
cpu_unitize(SIM.polar_vector, polar_vector_unitized);
cudaSafeCall(cudaMalloc((void ** )&cu_polar_vector, sizeof(*cu_polar_vector) * vector_length));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_polar_vector, polar_vector_unitized, vector_length));
cudaSafeCall(cudaMalloc((void ** )&cu_source_X, sizeof(*cu_source_X) * cu_sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_X, SIM.source_X, cu_sources));
cudaSafeCall(cudaMalloc((void ** )&cu_source_Y, sizeof(*cu_source_Y) * cu_sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_Y, SIM.source_Y, cu_sources));
cudaSafeCall(cudaMalloc((void ** )&cu_source_Z, sizeof(*cu_source_Z) * cu_sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_Z, SIM.source_Z, cu_sources));
cudaSafeCall(cudaMalloc((void ** )&cu_source_I, sizeof(*cu_source_I) * cu_sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, SIM.source_I, cu_sources));
cudaSafeCall(cudaMalloc((void ** )&cu_source_lambda, sizeof(*cu_source_lambda) * cu_sources));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, cu_sources));
cudaSafeCall(cudaMalloc((void ** )&cu_mosaic_umats, sizeof(*cu_mosaic_umats) * cu_mosaic_domains * 9));
cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_mosaic_umats, SIM.mosaic_umats, cu_mosaic_domains * 9));
};
exascale_api::~exascale_api(){
cudaSafeCall(cudaSetDevice(SIM.device_Id));
cudaSafeCall(cudaFree(cu_beam_vector));
cudaSafeCall(cudaFree(cu_spindle_vector));
cudaSafeCall(cudaFree(cu_source_X));
cudaSafeCall(cudaFree(cu_source_Y));
cudaSafeCall(cudaFree(cu_source_Z));
cudaSafeCall(cudaFree(cu_source_I));
cudaSafeCall(cudaFree(cu_source_lambda));
cudaSafeCall(cudaFree(cu_a0));
cudaSafeCall(cudaFree(cu_b0));
cudaSafeCall(cudaFree(cu_c0));
cudaSafeCall(cudaFree(cu_mosaic_umats));
cudaSafeCall(cudaFree(cu_polar_vector));
}
} // gpu
} // simtbx
|
541b9e4f3d283bf90eddf46cec9acc152f899618.hip | // !!! This is a file automatically generated by hipify!!!
/*--
This file is a part of libcubwt, a library for CUDA accelerated
burrows wheeler transform construction.
Copyright (c) 2022-2023 Ilya Grebnov <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Please see the file LICENSE for full copyright and license details.
--*/
#include "libcubwt.cuh"
#if defined(_MSC_VER) && defined(__INTELLISENSE__)
#define __launch_bounds__(block_size) /* */
#define __HIPCC__
#include <vector_functions.h>
#include <hip/device_functions.h>
#include <device_launch_parameters.h>
#endif
#include <hipcub/hipcub.hpp>
#include <hip/hip_runtime.h>
#include <utility>
#if defined(__GNUC__) || defined(__clang__) || defined(__HIPCC__)
#define RESTRICT __restrict__
#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
#define RESTRICT __restrict
#else
#define RESTRICT /* */
#endif
#ifndef __CUDA_ARCH__
#define CUDA_DEVICE_ARCH 0
#else
#define CUDA_DEVICE_ARCH __CUDA_ARCH__
#endif
#if CUDA_DEVICE_ARCH == 750
#define CUDA_SM_THREADS (1024)
#elif CUDA_DEVICE_ARCH == 860 || CUDA_DEVICE_ARCH == 870 || CUDA_DEVICE_ARCH == 890
#define CUDA_SM_THREADS (1536)
#else
#define CUDA_SM_THREADS (2048)
#endif
#if CUDA_DEVICE_ARCH == 860 || CUDA_DEVICE_ARCH == 870 || CUDA_DEVICE_ARCH == 890
#define CUDA_BLOCK_THREADS (768)
#else
#define CUDA_BLOCK_THREADS (512)
#endif
#define CUDA_WARP_THREADS (32)
#define CUDA_DEVICE_PADDING (12 * 768)
typedef struct LIBCUBWT_DEVICE_STORAGE
{
void * device_rsort_temp_storage;
size_t device_rsort_temp_storage_size;
void * device_ssort_temp_storage;
size_t device_ssort_temp_storage_size;
uint8_t * device_T;
uint8_t * device_heads;
uint32_t * device_SA;
uint32_t * device_ISA;
uint32_t * device_keys;
uint32_t * device_offsets;
uint32_t * device_temp_SA;
uint32_t * device_temp_ISA;
uint32_t * device_temp_keys;
uint64_t * device_SA_temp_SA;
uint64_t * device_keys_temp_keys;
uint64_t * device_offsets_ISA;
uint4 * device_descriptors_large;
uint4 * device_descriptors_copy;
uint2 * device_descriptors_small;
void * device_storage;
int32_t device_L2_cache_bits;
void * host_pinned_storage;
size_t host_pinned_storage_size;
int64_t max_length;
uint32_t num_unsorted_segments;
uint32_t num_unsorted_suffixes;
uint32_t cuda_block_threads;
hipStream_t cuda_stream;
} LIBCUBWT_DEVICE_STORAGE;
static int64_t libcubwt_get_error_code(hipError_t status)
{
return
status == hipErrorMemoryAllocation ? LIBCUBWT_GPU_NOT_ENOUGH_MEMORY :
status == hipErrorDevicesUnavailable ? LIBCUBWT_GPU_NOT_SUPPORTED :
status == hipErrorNoDevice ? LIBCUBWT_GPU_NOT_SUPPORTED :
LIBCUBWT_GPU_ERROR;
}
static hipError_t libcubwt_cuda_safe_call(const char * filename, int32_t line, hipError_t result, hipError_t status = hipSuccess)
{
#if !defined(NDEBUG)
if (result != hipSuccess)
{
fprintf(stderr, "%s(%d): libcubwt_cuda_safe_call failed %d: '%s'.\n", filename, line, result, hipGetErrorString(result));
fflush(stderr);
}
#else
(void)(filename); (void)(line);
#endif
return result != hipSuccess ? result : status;
}
template <typename T>
__device__ __forceinline__ T libcubwt_warp_reduce_sum(T value)
{
#if CUDA_DEVICE_ARCH >= 800 && !defined(__CUDA__)
return __reduce_add_sync((uint32_t)-1, value);
#else
#pragma unroll
for (uint32_t mask = CUDA_WARP_THREADS / 2; mask > 0; mask >>= 1)
{
value = hipcub::Sum()(value, __shfl_xor_sync((uint32_t)-1, value, mask, CUDA_WARP_THREADS));
}
return value;
#endif
}
template <typename T>
__device__ __forceinline__ T libcubwt_warp_reduce_max(T value)
{
#if CUDA_DEVICE_ARCH >= 800 && !defined(__CUDA__)
return __reduce_max_sync((uint32_t)-1, value);
#else
#pragma unroll
for (uint32_t mask = CUDA_WARP_THREADS / 2; mask > 0; mask >>= 1)
{
value = hipcub::Max()(value, __shfl_xor_sync((uint32_t)-1, value, mask, CUDA_WARP_THREADS));
}
return value;
#endif
}
template <typename T>
__device__ __forceinline__ void libcubwt_delay_or_prevent_hoisting(T delay)
{
#if CUDA_DEVICE_ARCH >= 700
__nanosleep(delay);
#else
__threadfence_block(); (void)(delay);
#endif
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_gather_values_uint32_kernel(const uint32_t * device_idx, const uint32_t * RESTRICT device_src, uint32_t * device_dst, uint32_t m)
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 4;
device_idx += block_index; device_dst += block_index; m -= block_index;
if (m >= CUDA_BLOCK_THREADS * 4)
{
const uint4 indexes = *(uint4 *)(device_idx + threadIdx.x * 4);
*(uint4 *)(device_dst + threadIdx.x * 4) = make_uint4(
__ldg(device_src + indexes.x),
__ldg(device_src + indexes.y),
__ldg(device_src + indexes.z),
__ldg(device_src + indexes.w));
}
else
{
for (uint32_t thread_index = threadIdx.x; thread_index < m; thread_index += CUDA_BLOCK_THREADS)
{
device_dst[thread_index] = __ldg(device_src + device_idx[thread_index]);
}
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_scatter_values_uint32_kernel(const uint32_t * RESTRICT device_idx, const uint32_t * RESTRICT device_src, uint32_t * RESTRICT device_dst, uint32_t m)
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 4;
device_idx += block_index; device_src += block_index; m -= block_index;
if (m >= CUDA_BLOCK_THREADS * 4)
{
const uint4 indexes = __ldg((uint4 *)(device_idx + threadIdx.x * 4));
const uint4 values = __ldg((uint4 *)(device_src + threadIdx.x * 4));
device_dst[indexes.x] = values.x;
device_dst[indexes.y] = values.y;
device_dst[indexes.z] = values.z;
device_dst[indexes.w] = values.w;
}
else
{
for (uint32_t thread_index = threadIdx.x; thread_index < m; thread_index += CUDA_BLOCK_THREADS)
{
device_dst[__ldg(device_idx + thread_index)] = __ldg(device_src + thread_index);
}
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_permute_block_values_uint32_kernel(const uint32_t * RESTRICT device_idx, const uint32_t * RESTRICT device_src, uint32_t * RESTRICT device_dst, uint32_t n)
{
__shared__ __align__(32) uint32_t cache[16 * CUDA_BLOCK_THREADS];
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 16;
device_idx += block_index; device_src += block_index; device_dst += block_index; n -= block_index;
if (n >= CUDA_BLOCK_THREADS * 16)
{
{
const uint32_t * RESTRICT thread_idx = device_idx + threadIdx.x * 4;
const uint32_t * RESTRICT thread_src = device_src + threadIdx.x * 4;
uint32_t * RESTRICT thread_cache = cache - block_index;
#pragma unroll
for (uint32_t round = 0; round < 4; round += 1)
{
const uint4 indexes = __ldg((uint4 *)(thread_idx));
const uint4 values = __ldg((uint4 *)(thread_src));
thread_cache[indexes.x] = values.x;
thread_cache[indexes.y] = values.y;
thread_cache[indexes.z] = values.z;
thread_cache[indexes.w] = values.w;
thread_idx += 4 * CUDA_BLOCK_THREADS; thread_src += 4 * CUDA_BLOCK_THREADS;
}
}
__syncthreads();
{
const uint32_t * RESTRICT thread_cache = cache + threadIdx.x * 4;
uint32_t * RESTRICT thread_dst = device_dst + threadIdx.x * 4;
#pragma unroll
for (uint32_t round = 0; round < 4; round += 1)
{
*(uint4 *)(thread_dst) = *(uint4 *)(thread_cache);
thread_cache += 4 * CUDA_BLOCK_THREADS; thread_dst += 4 * CUDA_BLOCK_THREADS;
}
}
}
else
{
{
uint32_t * RESTRICT thread_cache = cache - block_index;
for (uint32_t thread_index = threadIdx.x; thread_index < n; thread_index += CUDA_BLOCK_THREADS)
{
thread_cache[__ldg(device_idx + thread_index)] = __ldg(device_src + thread_index);
}
}
__syncthreads();
{
for (uint32_t thread_index = threadIdx.x; thread_index < n; thread_index += CUDA_BLOCK_THREADS)
{
device_dst[thread_index] = cache[thread_index];
}
}
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_scatter_values_uint64_kernel(const uint32_t * RESTRICT device_idx, const uint64_t * RESTRICT device_src, uint64_t * RESTRICT device_dst, uint32_t m)
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 2;
device_idx += block_index; device_src += block_index; m -= block_index;
if (m >= CUDA_BLOCK_THREADS * 2)
{
const uint2 indexes = __ldg((uint2 *)(device_idx + threadIdx.x * 2));
const ulonglong2 values = __ldg((ulonglong2 *)(device_src + threadIdx.x * 2));
device_dst[indexes.x] = values.x;
device_dst[indexes.y] = values.y;
}
else
{
for (uint32_t thread_index = threadIdx.x; thread_index < m; thread_index += CUDA_BLOCK_THREADS)
{
device_dst[__ldg(device_idx + thread_index)] = __ldg(device_src + thread_index);
}
}
}
static hipError_t libcubwt_gather_scatter_values_uint32(LIBCUBWT_DEVICE_STORAGE * storage, uint32_t * device_src_idx, uint32_t * device_src, uint32_t * device_dst_idx, uint32_t * device_dst, int64_t m, int64_t n, uint32_t * device_temp1, uint32_t * device_temp2)
{
hipError_t status = hipSuccess;
cub::DoubleBuffer<uint32_t> db_src_index_value(device_src_idx, device_temp1);
cub::DoubleBuffer<uint32_t> db_dst_index(device_dst_idx, device_temp2);
int32_t sort_end_bit = 0; while ((n - 1) >= ((int64_t)1 << sort_end_bit)) { sort_end_bit += 1; }
int32_t sort_aligned_bits = (sort_end_bit > storage->device_L2_cache_bits - 2) ? (sort_end_bit - storage->device_L2_cache_bits + 2 + 7) & (-8) : 0;
int32_t sort_start_bit = ::max(0, sort_end_bit - sort_aligned_bits);
if (sort_start_bit < sort_end_bit)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_src_index_value, db_dst_index,
(uint32_t)m,
sort_start_bit, sort_end_bit,
storage->cuda_stream));
}
if (status == hipSuccess)
{
int64_t n_gather_scatter_blocks = (m + storage->cuda_block_threads * 4 - 1) / (storage->cuda_block_threads * 4);
hipLaunchKernelGGL(( libcubwt_gather_values_uint32_kernel), dim3((uint32_t)n_gather_scatter_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream, db_src_index_value.Current(), device_src, db_src_index_value.Current(), (uint32_t)m);
if (sort_start_bit < sort_end_bit)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_dst_index, db_src_index_value,
(uint32_t)m,
sort_start_bit, sort_end_bit,
storage->cuda_stream));
}
if (status == hipSuccess)
{
hipLaunchKernelGGL(( libcubwt_scatter_values_uint32_kernel), dim3((uint32_t)n_gather_scatter_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream, db_dst_index.Current(), db_src_index_value.Current(), device_dst, (uint32_t)m);
}
}
return status;
}
static hipError_t libcubwt_scatter_values_uint32(LIBCUBWT_DEVICE_STORAGE * storage, uint32_t * device_idx, uint32_t * device_src, uint32_t * device_dst, int64_t m, int64_t n, uint32_t * device_temp1, uint32_t * device_temp2)
{
hipError_t status = hipSuccess;
cub::DoubleBuffer<uint32_t> db_index(device_idx, device_temp1);
cub::DoubleBuffer<uint32_t> db_value(device_src, device_temp2);
int32_t sort_end_bit = 0; while ((n - 1) >= ((int64_t)1 << sort_end_bit)) { sort_end_bit += 1; }
int32_t sort_aligned_bits = (sort_end_bit > storage->device_L2_cache_bits - 2) ? (sort_end_bit - storage->device_L2_cache_bits + 2 + 7) & (-8) : 0;
int32_t sort_start_bit = ::max(0, sort_end_bit - sort_aligned_bits);
if (sort_start_bit < sort_end_bit)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_index, db_value,
(uint32_t)m,
sort_start_bit, sort_end_bit,
storage->cuda_stream));
}
if (status == hipSuccess)
{
int64_t n_scatter_blocks = (m + storage->cuda_block_threads * 4 - 1) / (storage->cuda_block_threads * 4);
hipLaunchKernelGGL(( libcubwt_scatter_values_uint32_kernel), dim3((uint32_t)n_scatter_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream, db_index.Current(), db_value.Current(), device_dst, (uint32_t)m);
}
return status;
}
static hipError_t libcubwt_permute_values_uint32(LIBCUBWT_DEVICE_STORAGE * storage, uint32_t * device_idx, uint32_t * device_src, uint32_t * device_dst, int64_t n, uint32_t * device_temp1, uint32_t * device_temp2)
{
hipError_t status = hipSuccess;
cub::DoubleBuffer<uint32_t> db_index(device_idx, device_temp1);
cub::DoubleBuffer<uint32_t> db_value(device_src, device_temp2);
int32_t sort_end_bit = 0; while ((n - 1) >= ((int64_t)1 << sort_end_bit)) { sort_end_bit += 1; }
int32_t sort_aligned_bits = (sort_end_bit > storage->device_L2_cache_bits - 2) ? (sort_end_bit - storage->device_L2_cache_bits + 2 + 7) & (-8) : 0;
int32_t sort_start_bit = ::max(0, sort_end_bit - sort_aligned_bits);
if (sort_start_bit < sort_end_bit)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_index, db_value,
(uint32_t)n,
sort_start_bit, sort_end_bit,
storage->cuda_stream));
}
if (status == hipSuccess)
{
if (((storage->cuda_block_threads * 16) % ((int64_t)1 << sort_start_bit)) == 0)
{
int64_t n_permute_blocks = (n + storage->cuda_block_threads * 16 - 1) / (storage->cuda_block_threads * 16);
hipLaunchKernelGGL(( libcubwt_permute_block_values_uint32_kernel), dim3((uint32_t)n_permute_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream, db_index.Current(), db_value.Current(), device_dst, (uint32_t)n);
}
else
{
int64_t n_scatter_blocks = (n + storage->cuda_block_threads * 4 - 1) / (storage->cuda_block_threads * 4);
hipLaunchKernelGGL(( libcubwt_scatter_values_uint32_kernel), dim3((uint32_t)n_scatter_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream, db_index.Current(), db_value.Current(), device_dst, (uint32_t)n);
}
}
return status;
}
static hipError_t libcubwt_scatter_values_uint64(LIBCUBWT_DEVICE_STORAGE * storage, cub::DoubleBuffer<uint32_t> & db_index, cub::DoubleBuffer<uint64_t> & db_value, int64_t m, int64_t n, int64_t k = 0)
{
hipError_t status = hipSuccess;
int32_t sort_end_bit = 0; while ((n - 1) >= ((int64_t)1 << sort_end_bit)) { sort_end_bit += 1; }
int32_t sort_aligned_bits = (sort_end_bit > storage->device_L2_cache_bits - 3) ? (sort_end_bit - storage->device_L2_cache_bits + 3 + 7) & (-8) : 0;
int32_t sort_start_bit = ::max(0, sort_end_bit - sort_aligned_bits);
if (sort_start_bit < sort_end_bit)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_index, db_value,
(uint32_t)m,
sort_start_bit, sort_end_bit,
storage->cuda_stream));
}
if (status == hipSuccess)
{
int64_t n_scatter_blocks = (m + storage->cuda_block_threads * 2 - 1) / (storage->cuda_block_threads * 2);
hipLaunchKernelGGL(( libcubwt_scatter_values_uint64_kernel), dim3((uint32_t)n_scatter_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream, db_index.Current(), db_value.Current(), db_value.Alternate() - k, (uint32_t)m);
db_index.selector ^= 1;
db_value.selector ^= 1;
}
return status;
}
template <bool extra_sentinel_bits>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_initialize_device_arrays_kernel(const uint8_t * RESTRICT device_T, uint32_t * RESTRICT device_SA, uint64_t * RESTRICT device_keys)
{
__shared__ __align__(32) uint4 prefixes[4 * CUDA_BLOCK_THREADS];
{
device_T += blockIdx.x * CUDA_BLOCK_THREADS * 12 + threadIdx.x * 16;
if (threadIdx.x < (12 * CUDA_BLOCK_THREADS + 8 + 15) / 16) { prefixes[threadIdx.x] = __ldg((uint4 *)device_T); }
__syncthreads();
}
{
uint32_t * RESTRICT thread_cache = ((uint32_t *)prefixes) + threadIdx.x * 3;
uint4 * RESTRICT thread_prefixes = ((uint4 * )prefixes) + threadIdx.x * 4;
const uint32_t b0 = thread_cache[0];
const uint32_t b1 = thread_cache[1];
const uint32_t b2 = thread_cache[2];
const uint32_t b3 = thread_cache[3];
const uint32_t b4 = thread_cache[4];
__syncthreads();
thread_prefixes[0] = make_uint4
(
__byte_perm(b1, b2, 0x1234) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b0, b1, 0x1234),
__byte_perm(b1, b2, 0x2345) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b0, b1, 0x2345)
);
thread_prefixes[1] = make_uint4
(
__byte_perm(b2, b3, 0x0123) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b1, b2, 0x0123),
__byte_perm(b2, b3, 0x1234) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b1, b2, 0x1234)
);
thread_prefixes[2] = make_uint4
(
__byte_perm(b2, b3, 0x3456) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b1, b2, 0x3456),
__byte_perm(b3, b4, 0x0123) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b2, b3, 0x0123)
);
thread_prefixes[3] = make_uint4
(
__byte_perm(b3, b4, 0x2345) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b2, b3, 0x2345),
__byte_perm(b3, b4, 0x3456) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b2, b3, 0x3456)
);
__syncwarp();
}
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 8;
{
uint32_t thread_index = block_index + threadIdx.x * 4; device_SA += thread_index;
((uint4 *)device_SA)[0] = make_uint4(thread_index + 0, thread_index + 1, thread_index + 2, thread_index + 3);
thread_index += CUDA_BLOCK_THREADS * 4; device_SA += CUDA_BLOCK_THREADS * 4;
((uint4 *)device_SA)[0] = make_uint4(thread_index + 0, thread_index + 1, thread_index + 2, thread_index + 3);
}
{
device_keys += block_index;
uint4 * RESTRICT thread_prefixes = (uint4 *)prefixes + ((threadIdx.x / CUDA_WARP_THREADS) * CUDA_WARP_THREADS * 4) + (threadIdx.x % CUDA_WARP_THREADS);
uint4 * RESTRICT thread_keys = (uint4 *)device_keys + ((threadIdx.x / CUDA_WARP_THREADS) * CUDA_WARP_THREADS * 4) + (threadIdx.x % CUDA_WARP_THREADS);
thread_keys[0] = thread_prefixes[0]; thread_keys += CUDA_WARP_THREADS; thread_prefixes += CUDA_WARP_THREADS;
thread_keys[0] = thread_prefixes[0]; thread_keys += CUDA_WARP_THREADS; thread_prefixes += CUDA_WARP_THREADS;
thread_keys[0] = thread_prefixes[0]; thread_keys += CUDA_WARP_THREADS; thread_prefixes += CUDA_WARP_THREADS;
thread_keys[0] = thread_prefixes[0];
}
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, 1)
static void libcubwt_set_sentinel_values_kernel(uint8_t * RESTRICT device_T_end, uint64_t * RESTRICT device_keys_end, uint64_t k0, uint64_t k1, uint64_t k2, uint64_t k3, uint64_t k4, uint64_t k5, uint64_t k6, uint64_t k7)
{
device_T_end[0] = 0;
device_T_end[1] = 0;
device_T_end[2] = 0;
device_keys_end[-8] = k0;
device_keys_end[-7] = k1;
device_keys_end[-6] = k2;
device_keys_end[-5] = k3;
device_keys_end[-4] = k4;
device_keys_end[-3] = k5;
device_keys_end[-2] = k6;
device_keys_end[-1] = k7;
}
static hipError_t libcubwt_initialize_device_arrays(LIBCUBWT_DEVICE_STORAGE * storage, const uint8_t * T, int64_t reduced_n, int64_t expanded_n, int64_t input_n)
{
hipError_t status = hipSuccess;
if ((status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMemcpyAsync(storage->device_T, T, (size_t)input_n, hipMemcpyHostToDevice, storage->cuda_stream))) == hipSuccess)
{
int64_t n_initialize_blocks = 1 + (expanded_n / (storage->cuda_block_threads * 12));
bool extra_sentinel_bits = (expanded_n - input_n >= 2) || (T[input_n - 1] == 0);
if (extra_sentinel_bits)
{
hipLaunchKernelGGL(( libcubwt_initialize_device_arrays_kernel<true>), dim3((uint32_t)n_initialize_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream, storage->device_T, storage->device_SA, storage->device_keys_temp_keys);
}
else
{
hipLaunchKernelGGL(( libcubwt_initialize_device_arrays_kernel<false>), dim3((uint32_t)n_initialize_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream, storage->device_T, storage->device_SA, storage->device_keys_temp_keys);
}
{
uint64_t c0 = (expanded_n - 11 < input_n) ? T[expanded_n - 11] : (uint64_t)0;
uint64_t c1 = (expanded_n - 10 < input_n) ? T[expanded_n - 10] : (uint64_t)0;
uint64_t c2 = (expanded_n - 9 < input_n) ? T[expanded_n - 9] : (uint64_t)0;
uint64_t c3 = (expanded_n - 8 < input_n) ? T[expanded_n - 8] : (uint64_t)0;
uint64_t c4 = (expanded_n - 7 < input_n) ? T[expanded_n - 7] : (uint64_t)0;
uint64_t c5 = (expanded_n - 6 < input_n) ? T[expanded_n - 6] : (uint64_t)0;
uint64_t c6 = (expanded_n - 5 < input_n) ? T[expanded_n - 5] : (uint64_t)0;
uint64_t c7 = (expanded_n - 4 < input_n) ? T[expanded_n - 4] : (uint64_t)0;
uint64_t c8 = (expanded_n - 3 < input_n) ? T[expanded_n - 3] : (uint64_t)0;
uint64_t c9 = (expanded_n - 2 < input_n) ? T[expanded_n - 2] : (uint64_t)0;
uint64_t ca = (expanded_n - 1 < input_n) ? T[expanded_n - 1] : (uint64_t)0;
uint64_t k0 = (c0 << 56) | (c1 << 48) | (c2 << 40) | (c3 << 32) | (c4 << 24) | (c5 << 16) | (c6 << 8) | (c7 << 0) | (extra_sentinel_bits ? 7 : 1);
uint64_t k1 = (c1 << 56) | (c2 << 48) | (c3 << 40) | (c4 << 32) | (c5 << 24) | (c6 << 16) | (c7 << 8) | (c8 << 0) | (extra_sentinel_bits ? 7 : 1);
uint64_t k2 = (c3 << 56) | (c4 << 48) | (c5 << 40) | (c6 << 32) | (c7 << 24) | (c8 << 16) | (c9 << 8) | (ca << 0) | (extra_sentinel_bits ? 7 : 0);
uint64_t k3 = (c4 << 56) | (c5 << 48) | (c6 << 40) | (c7 << 32) | (c8 << 24) | (c9 << 16) | (ca << 8) | (extra_sentinel_bits ? 6 : 0);
uint64_t k4 = (c6 << 56) | (c7 << 48) | (c8 << 40) | (c9 << 32) | (ca << 24) | (extra_sentinel_bits ? 4 : 0);
uint64_t k5 = (c7 << 56) | (c8 << 48) | (c9 << 40) | (ca << 32) | (extra_sentinel_bits ? 3 : 0);
uint64_t k6 = (c9 << 56) | (ca << 48) | (extra_sentinel_bits ? 1 : 0);
uint64_t k7 = (ca << 56);
hipLaunchKernelGGL(( libcubwt_set_sentinel_values_kernel), dim3(1), dim3(1), 0, storage->cuda_stream, storage->device_T + input_n, storage->device_keys_temp_keys + reduced_n, k0, k1, k2, k3, k4, k5, k6, k7);
}
storage->num_unsorted_segments = (uint32_t)1;
storage->num_unsorted_suffixes = (uint32_t)reduced_n;
}
return status;
}
static hipError_t libcubwt_sort_suffixes_by_prefix(LIBCUBWT_DEVICE_STORAGE * storage, int64_t n)
{
cub::DoubleBuffer<uint64_t> db_keys(storage->device_keys_temp_keys, storage->device_offsets_ISA);
cub::DoubleBuffer<uint32_t> db_SA(storage->device_SA, storage->device_temp_SA);
hipError_t status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_keys, db_SA,
(uint32_t)n,
0, 64,
storage->cuda_stream));
if (db_keys.selector)
{
std::swap(storage->device_keys_temp_keys, storage->device_offsets_ISA);
std::swap(storage->device_keys, storage->device_offsets);
std::swap(storage->device_temp_keys, storage->device_ISA);
}
if (db_SA.selector)
{
std::swap(storage->device_SA, storage->device_temp_SA);
}
return status;
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_rank_and_segment_suffixes_initialization_kernel(uint32_t * RESTRICT device_SA, uint64_t * RESTRICT device_keys, uint8_t * RESTRICT device_heads, uint4 * RESTRICT device_descriptors_large, uint2 * RESTRICT device_descriptors_small, uint32_t n)
{
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS + threadIdx.x;
device_descriptors_large += thread_index;
device_descriptors_small += thread_index;
device_descriptors_large[0] = make_uint4(0, 0, 0, 0);
device_descriptors_small[0] = make_uint2(0, 0);
if (blockIdx.x == 0)
{
if (threadIdx.x < CUDA_WARP_THREADS)
{
device_descriptors_large[-CUDA_WARP_THREADS] = make_uint4((uint32_t)-1, 0, 0, 0);
device_descriptors_small[-CUDA_WARP_THREADS] = make_uint2((uint32_t)-1, 0);
}
{
uint64_t key = (threadIdx.x % 2 == 0) ? 0 : (uint64_t)-1;
device_SA += threadIdx.x; device_keys += threadIdx.x; device_heads += threadIdx.x;
if (threadIdx.x < 2)
{
device_keys [-2] = key;
device_heads[-2] = 1;
}
device_SA += n; device_keys += n; device_heads += n;
device_SA [0 * CUDA_BLOCK_THREADS] = n + threadIdx.x + 0 * CUDA_BLOCK_THREADS;
device_SA [1 * CUDA_BLOCK_THREADS] = n + threadIdx.x + 1 * CUDA_BLOCK_THREADS;
device_SA [2 * CUDA_BLOCK_THREADS] = n + threadIdx.x + 2 * CUDA_BLOCK_THREADS;
device_SA [3 * CUDA_BLOCK_THREADS] = n + threadIdx.x + 3 * CUDA_BLOCK_THREADS;
device_keys [0 * CUDA_BLOCK_THREADS] = key;
device_keys [1 * CUDA_BLOCK_THREADS] = key;
device_keys [2 * CUDA_BLOCK_THREADS] = key;
device_keys [3 * CUDA_BLOCK_THREADS] = key;
device_heads[0 * CUDA_BLOCK_THREADS] = 1;
device_heads[1 * CUDA_BLOCK_THREADS] = 1;
device_heads[2 * CUDA_BLOCK_THREADS] = 1;
device_heads[3 * CUDA_BLOCK_THREADS] = 1;
}
}
}
template <bool scatter_ranks_directly>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_rank_and_segment_suffixes_initiatory_kernel(
const uint32_t * RESTRICT device_SA,
const uint64_t * RESTRICT device_keys,
uint8_t * RESTRICT device_heads,
uint32_t * RESTRICT device_ISA,
uint32_t * RESTRICT device_offsets_begin,
uint32_t * RESTRICT device_offsets_end,
uint4 * RESTRICT device_descriptors
)
{
__shared__ __align__(32) uint2 warp_state[1 + CUDA_WARP_THREADS];
uint32_t thread_exclusive_suffix_rank;
uint32_t thread_suffix_rank[4];
uint32_t thread_exclusive_segment_index;
uint32_t thread_segment_index[4];
{
__shared__ __align__(32) ulonglong2 cache[1 + 2 * CUDA_BLOCK_THREADS];
{
device_keys += blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 2;
if (threadIdx.x == 0) { cache[0] = __ldg((ulonglong2 *)(device_keys - 2)); }
cache[1 + threadIdx.x + 0 * CUDA_BLOCK_THREADS] = __ldg((ulonglong2 *)(device_keys + 0 * CUDA_BLOCK_THREADS));
cache[1 + threadIdx.x + 1 * CUDA_BLOCK_THREADS] = __ldg((ulonglong2 *)(device_keys + 2 * CUDA_BLOCK_THREADS));
}
__syncthreads();
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 4;
const uint32_t thread_index = block_index + threadIdx.x * 4;
ulonglong2 key_a = cache[2 * threadIdx.x + 0];
ulonglong2 key_b = cache[2 * threadIdx.x + 1];
ulonglong2 key_c = cache[2 * threadIdx.x + 2];
uchar4 thread_new_heads = make_uchar4(
(key_a.y != key_b.x) ? (uint8_t)1 : (uint8_t)0,
(key_b.x != key_b.y) ? (uint8_t)1 : (uint8_t)0,
(key_b.y != key_c.x) ? (uint8_t)1 : (uint8_t)0,
(key_c.x != key_c.y) ? (uint8_t)1 : (uint8_t)0);
*(uchar4 *)(device_heads + thread_index) = thread_new_heads;
thread_suffix_rank[0] = (thread_new_heads.x != 0) ? (thread_index + 0) : 0;
thread_suffix_rank[1] = (thread_new_heads.y != 0) ? (thread_index + 1) : thread_suffix_rank[0];
thread_suffix_rank[2] = (thread_new_heads.z != 0) ? (thread_index + 2) : thread_suffix_rank[1];
thread_suffix_rank[3] = (thread_new_heads.w != 0) ? (thread_index + 3) : thread_suffix_rank[2];
thread_segment_index[0] = ((thread_new_heads.x != 0) && (key_a.x == key_a.y));
thread_segment_index[1] = thread_segment_index[0] + ((thread_new_heads.y != 0) && (thread_new_heads.x == 0));
thread_segment_index[2] = thread_segment_index[1] + ((thread_new_heads.z != 0) && (thread_new_heads.y == 0));
thread_segment_index[3] = thread_segment_index[2] + ((thread_new_heads.w != 0) && (thread_new_heads.z == 0));
}
}
{
uint32_t thread_inclusive_suffix_rank;
uint32_t thread_inclusive_segment_index;
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage[CUDA_WARP_THREADS];
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_suffix_rank[3] , thread_inclusive_suffix_rank , thread_exclusive_suffix_rank , (uint32_t)0, hipcub::Max());
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_segment_index[3], thread_inclusive_segment_index, thread_exclusive_segment_index, (uint32_t)0, hipcub::Sum());
if ((threadIdx.x % CUDA_WARP_THREADS) == (CUDA_WARP_THREADS - 1))
{
warp_state[threadIdx.x / CUDA_WARP_THREADS] = make_uint2(thread_inclusive_suffix_rank, thread_inclusive_segment_index);
}
__syncthreads();
}
{
if (threadIdx.x < CUDA_WARP_THREADS)
{
uint32_t block_exclusive_suffix_rank = 0;
uint32_t block_exclusive_segment_index = 0;
uint32_t warp_inclusive_suffix_rank;
uint32_t warp_inclusive_segment_index;
{
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage;
uint2 warp_inclusive_state = warp_state[threadIdx.x];
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state.x, warp_inclusive_suffix_rank , hipcub::Max());
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state.y, warp_inclusive_segment_index, hipcub::Sum());
}
{
const uint32_t descriptor_status_aggregate_not_ready = 0;
const uint32_t descriptor_status_partial_aggregate_ready = 1;
const uint32_t descriptor_status_full_aggregate_ready = 4;
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint4(descriptor_status_partial_aggregate_ready, 0, warp_inclusive_suffix_rank, warp_inclusive_segment_index));
}
{
uint4 * RESTRICT descriptors_lookback = device_descriptors + blockIdx.x + threadIdx.x;
int32_t full_aggregate_lane, delay = 8;
do
{
descriptors_lookback -= CUDA_WARP_THREADS;
uint4 block_descriptor;
do
{
libcubwt_delay_or_prevent_hoisting(delay <<= 1);
block_descriptor = cub::ThreadLoad<cub::LOAD_CG>(descriptors_lookback);
} while (__any_sync((uint32_t)-1, block_descriptor.x == descriptor_status_aggregate_not_ready));
delay = 0;
{
full_aggregate_lane = 31 - __clz((int32_t)__ballot_sync((uint32_t)-1, block_descriptor.x != descriptor_status_partial_aggregate_ready));
block_descriptor.z = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.z : 0;
block_descriptor.w = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.w : 0;
}
{
block_exclusive_suffix_rank = hipcub::Max()(block_exclusive_suffix_rank , libcubwt_warp_reduce_max(block_descriptor.z));
block_exclusive_segment_index = hipcub::Sum()(block_exclusive_segment_index, libcubwt_warp_reduce_sum(block_descriptor.w));
}
} while (full_aggregate_lane == -1);
warp_inclusive_suffix_rank = hipcub::Max()(warp_inclusive_suffix_rank , block_exclusive_suffix_rank );
warp_inclusive_segment_index = hipcub::Sum()(warp_inclusive_segment_index, block_exclusive_segment_index);
}
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint4(descriptor_status_full_aggregate_ready, 0, warp_inclusive_suffix_rank, warp_inclusive_segment_index));
}
}
{
if (threadIdx.x == 0)
{
warp_state[0] = make_uint2(block_exclusive_suffix_rank, block_exclusive_segment_index);
}
warp_state[1 + threadIdx.x] = make_uint2(warp_inclusive_suffix_rank, warp_inclusive_segment_index);
}
}
__syncthreads();
}
{
uint2 warp_exclusive_state = warp_state[threadIdx.x / CUDA_WARP_THREADS];
thread_exclusive_suffix_rank = hipcub::Max()(thread_exclusive_suffix_rank , warp_exclusive_state.x);
thread_exclusive_segment_index = hipcub::Sum()(thread_exclusive_segment_index, warp_exclusive_state.y);
thread_suffix_rank[0] = hipcub::Max()(thread_suffix_rank[0], thread_exclusive_suffix_rank);
thread_suffix_rank[1] = hipcub::Max()(thread_suffix_rank[1], thread_exclusive_suffix_rank);
thread_suffix_rank[2] = hipcub::Max()(thread_suffix_rank[2], thread_exclusive_suffix_rank);
thread_suffix_rank[3] = hipcub::Max()(thread_suffix_rank[3], thread_exclusive_suffix_rank);
thread_segment_index[0] = hipcub::Sum()(thread_segment_index[0], thread_exclusive_segment_index);
thread_segment_index[1] = hipcub::Sum()(thread_segment_index[1], thread_exclusive_segment_index);
thread_segment_index[2] = hipcub::Sum()(thread_segment_index[2], thread_exclusive_segment_index);
thread_segment_index[3] = hipcub::Sum()(thread_segment_index[3], thread_exclusive_segment_index);
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
if (thread_exclusive_segment_index != thread_segment_index[0]) { device_offsets_begin[thread_segment_index[0]] = thread_exclusive_suffix_rank; device_offsets_end[thread_segment_index[0]] = thread_index + 0; }
if (thread_segment_index[0] != thread_segment_index[1]) { device_offsets_begin[thread_segment_index[1]] = thread_suffix_rank[0]; device_offsets_end[thread_segment_index[1]] = thread_index + 1; }
if (thread_segment_index[1] != thread_segment_index[2]) { device_offsets_begin[thread_segment_index[2]] = thread_suffix_rank[1]; device_offsets_end[thread_segment_index[2]] = thread_index + 2; }
if (thread_segment_index[2] != thread_segment_index[3]) { device_offsets_begin[thread_segment_index[3]] = thread_suffix_rank[2]; device_offsets_end[thread_segment_index[3]] = thread_index + 3; }
if (scatter_ranks_directly)
{
const uint4 indexes = __ldg((uint4 *)(device_SA + thread_index));
device_ISA[indexes.x] = thread_suffix_rank[0];
device_ISA[indexes.y] = thread_suffix_rank[1];
device_ISA[indexes.z] = thread_suffix_rank[2];
device_ISA[indexes.w] = thread_suffix_rank[3];
}
else
{
*(uint4 *)(device_ISA + thread_index) = make_uint4(thread_suffix_rank[0], thread_suffix_rank[1], thread_suffix_rank[2], thread_suffix_rank[3]);
}
}
}
template <bool alternate_block_descriptor_statuses, bool scatter_ranks_directly>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_rank_and_segment_suffixes_incremental_kernel(
const uint32_t * RESTRICT device_SA,
const uint32_t * RESTRICT device_keys,
uint8_t * RESTRICT device_heads,
uint32_t * RESTRICT device_out_SA,
uint32_t * RESTRICT device_out_ISA,
uint32_t * RESTRICT device_offsets_begin,
uint32_t * RESTRICT device_offsets_end,
uint4 * RESTRICT device_descriptors,
const uint4 * RESTRICT device_descriptors_copy
)
{
__shared__ __align__(32) uint4 warp_state1[1 + CUDA_WARP_THREADS];
__shared__ __align__(32) uint32_t warp_state2[1 + CUDA_WARP_THREADS];
uchar4 thread_old_heads;
uint32_t thread_exclusive_suffix_old_rank;
uchar4 thread_new_heads;
uint32_t thread_exclusive_suffix_new_rank;
uint32_t thread_exclusive_segment_index;
uint32_t thread_segment_index[4];
uint32_t thread_exclusive_suffix_index;
uint32_t thread_suffix_index[4];
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 4;
const uint32_t thread_index = block_index + threadIdx.x * 4;
device_keys += thread_index; device_heads += thread_index;
uint2 key_a = __ldg((uint2 *)(device_keys - 2));
uint4 key_b = __ldg((uint4 *)(device_keys));
thread_old_heads = *(uchar4 *)(device_heads);
thread_new_heads = make_uchar4(
(key_a.y != key_b.x) ? (uint8_t)1 : (uint8_t)thread_old_heads.x,
(key_b.x != key_b.y) ? (uint8_t)1 : (uint8_t)thread_old_heads.y,
(key_b.y != key_b.z) ? (uint8_t)1 : (uint8_t)thread_old_heads.z,
(key_b.z != key_b.w) ? (uint8_t)1 : (uint8_t)thread_old_heads.w);
*(uchar4 *)(device_heads) = thread_new_heads;
thread_exclusive_suffix_old_rank = (thread_old_heads.x != 0) ? (thread_index + 0) : 0;
thread_exclusive_suffix_old_rank = (thread_old_heads.y != 0) ? (thread_index + 1) : thread_exclusive_suffix_old_rank;
thread_exclusive_suffix_old_rank = (thread_old_heads.z != 0) ? (thread_index + 2) : thread_exclusive_suffix_old_rank;
thread_exclusive_suffix_old_rank = (thread_old_heads.w != 0) ? (thread_index + 3) : thread_exclusive_suffix_old_rank;
thread_exclusive_suffix_new_rank = (thread_new_heads.x != 0) ? (thread_index + 0) : 0;
thread_exclusive_suffix_new_rank = (thread_new_heads.y != 0) ? (thread_index + 1) : thread_exclusive_suffix_new_rank;
thread_exclusive_suffix_new_rank = (thread_new_heads.z != 0) ? (thread_index + 2) : thread_exclusive_suffix_new_rank;
thread_exclusive_suffix_new_rank = (thread_new_heads.w != 0) ? (thread_index + 3) : thread_exclusive_suffix_new_rank;
thread_segment_index[0] = ((thread_new_heads.x != 0) && (key_a.x == key_a.y) && (device_heads[-1] == 0));
thread_segment_index[1] = thread_segment_index[0] + ((thread_new_heads.y != 0) && (thread_new_heads.x == 0));
thread_segment_index[2] = thread_segment_index[1] + ((thread_new_heads.z != 0) && (thread_new_heads.y == 0));
thread_segment_index[3] = thread_segment_index[2] + ((thread_new_heads.w != 0) && (thread_new_heads.z == 0));
}
{
uint32_t thread_inclusive_suffix_old_rank;
uint32_t thread_inclusive_suffix_new_rank;
uint32_t thread_inclusive_segment_index;
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage[CUDA_BLOCK_THREADS];
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_exclusive_suffix_old_rank, thread_inclusive_suffix_old_rank, thread_exclusive_suffix_old_rank, (uint32_t)0, hipcub::Max());
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_exclusive_suffix_new_rank, thread_inclusive_suffix_new_rank, thread_exclusive_suffix_new_rank, (uint32_t)0, hipcub::Max());
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_segment_index[3] , thread_inclusive_segment_index , thread_exclusive_segment_index , (uint32_t)0, hipcub::Sum());
if ((threadIdx.x % CUDA_WARP_THREADS) == (CUDA_WARP_THREADS - 1))
{
warp_state1[threadIdx.x / CUDA_WARP_THREADS] = make_uint4(0, thread_inclusive_suffix_old_rank, thread_inclusive_suffix_new_rank, thread_inclusive_segment_index);
}
__syncthreads();
}
{
if (threadIdx.x < CUDA_WARP_THREADS)
{
uint32_t block_exclusive_suffix_new_rank = 0;
uint32_t block_exclusive_segment_index = 0;
uint32_t warp_inclusive_suffix_old_rank;
uint32_t warp_inclusive_suffix_new_rank;
uint32_t warp_inclusive_segment_index;
{
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage;
uint4 warp_inclusive_state = warp_state1[threadIdx.x];
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state.y, warp_inclusive_suffix_old_rank, hipcub::Max());
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state.z, warp_inclusive_suffix_new_rank, hipcub::Max());
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state.w, warp_inclusive_segment_index , hipcub::Sum());
}
{
const uint32_t descriptor_status_aggregate_not_ready = alternate_block_descriptor_statuses ? 4 : 0;
const uint32_t descriptor_status_partial_aggregate_ready = alternate_block_descriptor_statuses ? 3 : 1;
const uint32_t descriptor_status_full_aggregate_ready = scatter_ranks_directly ? (alternate_block_descriptor_statuses ? 0 : 4) : 2;
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint4(descriptor_status_partial_aggregate_ready, 0, warp_inclusive_suffix_new_rank, warp_inclusive_segment_index));
}
{
uint4 * RESTRICT descriptors_lookback = device_descriptors + blockIdx.x + threadIdx.x;
int32_t full_aggregate_lane, delay = 8;
do
{
descriptors_lookback -= CUDA_WARP_THREADS;
uint4 block_descriptor;
do
{
libcubwt_delay_or_prevent_hoisting(delay <<= 1);
block_descriptor = cub::ThreadLoad<cub::LOAD_CG>(descriptors_lookback);
} while (__any_sync((uint32_t)-1, block_descriptor.x == descriptor_status_aggregate_not_ready));
delay = 0;
{
full_aggregate_lane = 31 - __clz((int32_t)__ballot_sync((uint32_t)-1, block_descriptor.x != descriptor_status_partial_aggregate_ready));
block_descriptor.z = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.z : 0;
block_descriptor.w = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.w : 0;
}
{
block_exclusive_suffix_new_rank = hipcub::Max()(block_exclusive_suffix_new_rank , libcubwt_warp_reduce_max(block_descriptor.z));
block_exclusive_segment_index = hipcub::Sum()(block_exclusive_segment_index , libcubwt_warp_reduce_sum(block_descriptor.w));
}
} while (full_aggregate_lane == -1);
warp_inclusive_suffix_new_rank = hipcub::Max()(warp_inclusive_suffix_new_rank, block_exclusive_suffix_new_rank);
warp_inclusive_segment_index = hipcub::Sum()(warp_inclusive_segment_index , block_exclusive_segment_index );
}
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint4(descriptor_status_full_aggregate_ready, 0, warp_inclusive_suffix_new_rank, warp_inclusive_segment_index));
}
}
{
uint32_t block_exclusive_suffix_old_rank = __ldg((uint32_t *)(device_descriptors_copy + blockIdx.x - 1) + 2);
warp_inclusive_suffix_old_rank = hipcub::Max()(warp_inclusive_suffix_old_rank, block_exclusive_suffix_old_rank);
if (threadIdx.x == 0)
{
warp_state1[0] = make_uint4(0, block_exclusive_suffix_old_rank, block_exclusive_suffix_new_rank, block_exclusive_segment_index);
}
warp_state1[1 + threadIdx.x] = make_uint4(0, warp_inclusive_suffix_old_rank, warp_inclusive_suffix_new_rank, warp_inclusive_segment_index);
}
}
__syncthreads();
}
{
uint32_t thread_suffix_old_rank[4];
uint32_t thread_suffix_new_rank[4];
uint4 warp_exclusive_state = warp_state1[threadIdx.x / CUDA_WARP_THREADS];
thread_exclusive_suffix_old_rank = hipcub::Max()(thread_exclusive_suffix_old_rank, warp_exclusive_state.y);
thread_exclusive_suffix_new_rank = hipcub::Max()(thread_exclusive_suffix_new_rank, warp_exclusive_state.z);
thread_exclusive_segment_index = hipcub::Sum()(thread_exclusive_segment_index , warp_exclusive_state.w);
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
thread_suffix_old_rank[0] = (thread_old_heads.x != 0) ? (thread_index + 0) : thread_exclusive_suffix_old_rank;
thread_suffix_old_rank[1] = (thread_old_heads.y != 0) ? (thread_index + 1) : thread_suffix_old_rank[0];
thread_suffix_old_rank[2] = (thread_old_heads.z != 0) ? (thread_index + 2) : thread_suffix_old_rank[1];
thread_suffix_old_rank[3] = (thread_old_heads.w != 0) ? (thread_index + 3) : thread_suffix_old_rank[2];
thread_suffix_new_rank[0] = (thread_new_heads.x != 0) ? (thread_index + 0) : thread_exclusive_suffix_new_rank;
thread_suffix_new_rank[1] = (thread_new_heads.y != 0) ? (thread_index + 1) : thread_suffix_new_rank[0];
thread_suffix_new_rank[2] = (thread_new_heads.z != 0) ? (thread_index + 2) : thread_suffix_new_rank[1];
thread_suffix_new_rank[3] = (thread_new_heads.w != 0) ? (thread_index + 3) : thread_suffix_new_rank[2];
thread_segment_index[0] = hipcub::Sum()(thread_segment_index[0], thread_exclusive_segment_index);
thread_segment_index[1] = hipcub::Sum()(thread_segment_index[1], thread_exclusive_segment_index);
thread_segment_index[2] = hipcub::Sum()(thread_segment_index[2], thread_exclusive_segment_index);
thread_segment_index[3] = hipcub::Sum()(thread_segment_index[3], thread_exclusive_segment_index);
if (thread_exclusive_segment_index != thread_segment_index[0]) { device_offsets_begin[thread_segment_index[0]] = thread_exclusive_suffix_new_rank; device_offsets_end[thread_segment_index[0]] = thread_index + 0; }
if (thread_segment_index[0] != thread_segment_index[1]) { device_offsets_begin[thread_segment_index[1]] = thread_suffix_new_rank[0]; device_offsets_end[thread_segment_index[1]] = thread_index + 1; }
if (thread_segment_index[1] != thread_segment_index[2]) { device_offsets_begin[thread_segment_index[2]] = thread_suffix_new_rank[1]; device_offsets_end[thread_segment_index[2]] = thread_index + 2; }
if (thread_segment_index[2] != thread_segment_index[3]) { device_offsets_begin[thread_segment_index[3]] = thread_suffix_new_rank[2]; device_offsets_end[thread_segment_index[3]] = thread_index + 3; }
if (scatter_ranks_directly)
{
const uint4 indexes = __ldg((uint4 *)(device_SA + thread_index));
if (thread_suffix_old_rank[0] != thread_suffix_new_rank[0]) { device_out_ISA[indexes.x] = thread_suffix_new_rank[0]; }
if (thread_suffix_old_rank[1] != thread_suffix_new_rank[1]) { device_out_ISA[indexes.y] = thread_suffix_new_rank[1]; }
if (thread_suffix_old_rank[2] != thread_suffix_new_rank[2]) { device_out_ISA[indexes.z] = thread_suffix_new_rank[2]; }
if (thread_suffix_old_rank[3] != thread_suffix_new_rank[3]) { device_out_ISA[indexes.w] = thread_suffix_new_rank[3]; }
}
else
{
thread_suffix_index[0] = (thread_suffix_old_rank[0] != thread_suffix_new_rank[0]);
thread_suffix_index[1] = thread_suffix_index[0] + (thread_suffix_old_rank[1] != thread_suffix_new_rank[1]);
thread_suffix_index[2] = thread_suffix_index[1] + (thread_suffix_old_rank[2] != thread_suffix_new_rank[2]);
thread_suffix_index[3] = thread_suffix_index[2] + (thread_suffix_old_rank[3] != thread_suffix_new_rank[3]);
}
}
if (!scatter_ranks_directly)
{
{
uint32_t thread_inclusive_suffix_index;
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage[CUDA_WARP_THREADS];
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_suffix_index[3], thread_inclusive_suffix_index, thread_exclusive_suffix_index, (uint32_t)0, hipcub::Sum());
if ((threadIdx.x % CUDA_WARP_THREADS) == (CUDA_WARP_THREADS - 1))
{
warp_state2[threadIdx.x / CUDA_WARP_THREADS] = thread_inclusive_suffix_index;
}
__syncthreads();
}
{
if (threadIdx.x < CUDA_WARP_THREADS)
{
uint32_t block_exclusive_suffix_index = 0;
uint32_t warp_inclusive_suffix_index;
{
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage;
uint32_t warp_inclusive_state = warp_state2[threadIdx.x];
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state, warp_inclusive_suffix_index, hipcub::Sum());
}
{
const uint32_t descriptor_status_aggregate_not_ready = alternate_block_descriptor_statuses ? 2 : 2;
const uint32_t descriptor_status_partial_aggregate_ready = alternate_block_descriptor_statuses ? 1 : 3;
const uint32_t descriptor_status_full_aggregate_ready = alternate_block_descriptor_statuses ? 0 : 4;
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>((uint2 *)(device_descriptors + blockIdx.x), make_uint2(descriptor_status_partial_aggregate_ready, warp_inclusive_suffix_index));
}
{
uint4 * RESTRICT descriptors_lookback = device_descriptors + blockIdx.x + threadIdx.x;
int32_t full_aggregate_lane, delay = 8;
do
{
descriptors_lookback -= CUDA_WARP_THREADS;
uint2 block_descriptor;
do
{
libcubwt_delay_or_prevent_hoisting(delay <<= 1);
block_descriptor = cub::ThreadLoad<cub::LOAD_CG>((uint2 *)descriptors_lookback);
} while (__any_sync((uint32_t)-1, alternate_block_descriptor_statuses
? ((int32_t )block_descriptor.x >= (int32_t )descriptor_status_aggregate_not_ready)
: ((uint32_t)block_descriptor.x <= (uint32_t)descriptor_status_aggregate_not_ready)));
delay = 0;
{
full_aggregate_lane = 31 - __clz((int32_t)__ballot_sync((uint32_t)-1, block_descriptor.x != descriptor_status_partial_aggregate_ready));
block_descriptor.y = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.y : 0;
}
{
block_exclusive_suffix_index = hipcub::Sum()(block_exclusive_suffix_index, libcubwt_warp_reduce_sum(block_descriptor.y));
}
} while (full_aggregate_lane == -1);
warp_inclusive_suffix_index = hipcub::Sum()(warp_inclusive_suffix_index, block_exclusive_suffix_index);
}
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>((uint2 *)(device_descriptors + blockIdx.x), make_uint2(descriptor_status_full_aggregate_ready, warp_inclusive_suffix_index));
}
}
{
if (threadIdx.x == 0)
{
warp_state2[0] = block_exclusive_suffix_index;
}
warp_state2[1 + threadIdx.x] = warp_inclusive_suffix_index;
}
}
__syncthreads();
}
{
if (thread_suffix_index[3] > 0)
{
uint32_t thread_suffix_new_rank[4];
uint32_t warp_exclusive_state = warp_state2[threadIdx.x / CUDA_WARP_THREADS];
thread_exclusive_suffix_index = hipcub::Sum()(thread_exclusive_suffix_index, warp_exclusive_state);
thread_suffix_index[0] = hipcub::Sum()(thread_suffix_index[0], thread_exclusive_suffix_index);
thread_suffix_index[1] = hipcub::Sum()(thread_suffix_index[1], thread_exclusive_suffix_index);
thread_suffix_index[2] = hipcub::Sum()(thread_suffix_index[2], thread_exclusive_suffix_index);
thread_suffix_index[3] = hipcub::Sum()(thread_suffix_index[3], thread_exclusive_suffix_index);
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
const uint4 indexes = __ldg((uint4 *)(device_SA + thread_index));
thread_suffix_new_rank[0] = (thread_new_heads.x != 0) ? (thread_index + 0) : thread_exclusive_suffix_new_rank;
thread_suffix_new_rank[1] = (thread_new_heads.y != 0) ? (thread_index + 1) : thread_suffix_new_rank[0];
thread_suffix_new_rank[2] = (thread_new_heads.z != 0) ? (thread_index + 2) : thread_suffix_new_rank[1];
thread_suffix_new_rank[3] = (thread_new_heads.w != 0) ? (thread_index + 3) : thread_suffix_new_rank[2];
if (thread_exclusive_suffix_index != thread_suffix_index[0]) { device_out_SA[thread_suffix_index[0]] = indexes.x; device_out_ISA[thread_suffix_index[0]] = thread_suffix_new_rank[0]; }
if (thread_suffix_index[0] != thread_suffix_index[1]) { device_out_SA[thread_suffix_index[1]] = indexes.y; device_out_ISA[thread_suffix_index[1]] = thread_suffix_new_rank[1]; }
if (thread_suffix_index[1] != thread_suffix_index[2]) { device_out_SA[thread_suffix_index[2]] = indexes.z; device_out_ISA[thread_suffix_index[2]] = thread_suffix_new_rank[2]; }
if (thread_suffix_index[2] != thread_suffix_index[3]) { device_out_SA[thread_suffix_index[3]] = indexes.w; device_out_ISA[thread_suffix_index[3]] = thread_suffix_new_rank[3]; }
}
}
}
}
static hipError_t libcubwt_rank_and_segment_suffixes(LIBCUBWT_DEVICE_STORAGE * storage, int64_t n, int64_t iteration)
{
hipError_t status = hipSuccess;
int64_t n_segmentation_blocks = 1 + (n / (storage->cuda_block_threads * 4));
int64_t n_initialization_blocks = (n_segmentation_blocks + storage->cuda_block_threads - 1) / storage->cuda_block_threads;
bool scatter_ranks_directly = (n <= ((int64_t)1 << (storage->device_L2_cache_bits - 3)));
if (iteration == 0)
{
hipLaunchKernelGGL(( libcubwt_rank_and_segment_suffixes_initialization_kernel), dim3((uint32_t)n_initialization_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_SA,
storage->device_keys_temp_keys,
storage->device_heads,
storage->device_descriptors_large,
storage->device_descriptors_small,
(uint32_t)n);
if (scatter_ranks_directly)
{
hipLaunchKernelGGL(( libcubwt_rank_and_segment_suffixes_initiatory_kernel<true>), dim3((uint32_t)n_segmentation_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_SA,
storage->device_keys_temp_keys,
storage->device_heads,
storage->device_ISA,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large);
}
else
{
hipLaunchKernelGGL(( libcubwt_rank_and_segment_suffixes_initiatory_kernel<false>), dim3((uint32_t)n_segmentation_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
NULL,
storage->device_keys_temp_keys,
storage->device_heads,
storage->device_temp_ISA,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large);
}
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMemcpyAsync(storage->host_pinned_storage, &storage->device_descriptors_large[n_segmentation_blocks - 1], sizeof(uint4), hipMemcpyDeviceToHost, storage->cuda_stream));
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipStreamSynchronize(storage->cuda_stream), status);
if (status == hipSuccess)
{
storage->num_unsorted_segments = ((uint4 *)storage->host_pinned_storage)->w;
if (!scatter_ranks_directly)
{
if ((status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMemcpyAsync(storage->device_temp_SA, storage->device_SA, n * sizeof(uint32_t), hipMemcpyDeviceToDevice, storage->cuda_stream))) == hipSuccess)
{
status = libcubwt_permute_values_uint32(storage, storage->device_temp_SA, storage->device_temp_ISA, storage->device_ISA, n, storage->device_keys, storage->device_temp_keys);
}
}
}
}
else
{
if ((status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMemcpyAsync(storage->device_descriptors_copy - 1, storage->device_descriptors_large - 1, n_segmentation_blocks * sizeof(uint4), hipMemcpyDeviceToDevice, storage->cuda_stream))) == hipSuccess)
{
if (scatter_ranks_directly)
{
if ((iteration % 2) == 0)
{
hipLaunchKernelGGL(( libcubwt_rank_and_segment_suffixes_incremental_kernel<false, true>), dim3((uint32_t)n_segmentation_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_SA,
storage->device_keys,
storage->device_heads,
NULL, storage->device_ISA,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large, storage->device_descriptors_copy);
}
else
{
hipLaunchKernelGGL(( libcubwt_rank_and_segment_suffixes_incremental_kernel<true, true>), dim3((uint32_t)n_segmentation_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_SA,
storage->device_keys,
storage->device_heads,
NULL, storage->device_ISA,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large, storage->device_descriptors_copy);
}
}
else
{
if ((iteration % 2) == 0)
{
hipLaunchKernelGGL(( libcubwt_rank_and_segment_suffixes_incremental_kernel<false, false>), dim3((uint32_t)n_segmentation_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_SA,
storage->device_keys,
storage->device_heads,
storage->device_temp_SA - 1, storage->device_temp_ISA - 1,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large, storage->device_descriptors_copy);
}
else
{
hipLaunchKernelGGL(( libcubwt_rank_and_segment_suffixes_incremental_kernel<true, false>), dim3((uint32_t)n_segmentation_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_SA,
storage->device_keys,
storage->device_heads,
storage->device_temp_SA - 1, storage->device_temp_ISA - 1,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large, storage->device_descriptors_copy);
}
}
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMemcpyAsync(storage->host_pinned_storage, &storage->device_descriptors_large[n_segmentation_blocks - 1], sizeof(uint4), hipMemcpyDeviceToHost, storage->cuda_stream));
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipStreamSynchronize(storage->cuda_stream), status);
if (status == hipSuccess)
{
storage->num_unsorted_segments = ((uint4 *)storage->host_pinned_storage)->w;
if (!scatter_ranks_directly)
{
uint32_t num_updated_suffixes = ((uint4 *)storage->host_pinned_storage)->y;
if (num_updated_suffixes > 0)
{
status = libcubwt_scatter_values_uint32(storage, storage->device_temp_SA, storage->device_temp_ISA, storage->device_ISA, num_updated_suffixes, n, storage->device_keys, storage->device_temp_keys);
}
}
}
}
}
return status;
}
template <bool alternate_block_descriptor_statuses>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_gather_unsorted_suffixes_kernel(
const uint8_t * RESTRICT device_heads,
const uint32_t * RESTRICT device_SA,
uint32_t * RESTRICT device_out_keys,
uint32_t * RESTRICT device_out_SA,
uint2 * RESTRICT device_descriptors)
{
__shared__ __align__(32) uint32_t warp_state[1 + CUDA_WARP_THREADS];
uint32_t thread_exclusive_suffix_index;
uint32_t thread_suffix_index[4];
{
device_heads += blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
const uchar4 current_heads = __ldg((uchar4 *)(device_heads));
const uint8_t next_head = current_heads.w > 0 ? __ldg(device_heads + 4) : 0;
thread_suffix_index[0] = (current_heads.x + current_heads.y < 2);
thread_suffix_index[1] = thread_suffix_index[0] + (current_heads.y + current_heads.z < 2);
thread_suffix_index[2] = thread_suffix_index[1] + (current_heads.z + current_heads.w < 2);
thread_suffix_index[3] = thread_suffix_index[2] + (current_heads.w + next_head < 2);
}
{
uint32_t thread_inclusive_suffix_index;
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage[CUDA_WARP_THREADS];
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_suffix_index[3], thread_inclusive_suffix_index, thread_exclusive_suffix_index, (uint32_t)0, hipcub::Sum());
if ((threadIdx.x % CUDA_WARP_THREADS) == (CUDA_WARP_THREADS - 1))
{
warp_state[threadIdx.x / CUDA_WARP_THREADS] = thread_inclusive_suffix_index;
}
__syncthreads();
}
{
if (threadIdx.x < CUDA_WARP_THREADS)
{
uint32_t block_exclusive_suffix_index = 0;
uint32_t warp_inclusive_suffix_index;
{
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage;
uint32_t warp_inclusive_state = warp_state[threadIdx.x];
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state, warp_inclusive_suffix_index, hipcub::Sum());
}
{
const uint32_t descriptor_status_aggregate_not_ready = alternate_block_descriptor_statuses ? 2 : 0;
const uint32_t descriptor_status_partial_aggregate_ready = alternate_block_descriptor_statuses ? 1 : 1;
const uint32_t descriptor_status_full_aggregate_ready = alternate_block_descriptor_statuses ? 0 : 2;
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint2(descriptor_status_partial_aggregate_ready, warp_inclusive_suffix_index));
}
{
uint2 * RESTRICT descriptors_lookback = device_descriptors + blockIdx.x + threadIdx.x;
int32_t full_aggregate_lane, delay = 8;
do
{
descriptors_lookback -= CUDA_WARP_THREADS;
uint2 block_descriptor;
do
{
libcubwt_delay_or_prevent_hoisting(delay <<= 1);
block_descriptor = cub::ThreadLoad<cub::LOAD_CG>(descriptors_lookback);
} while (__any_sync((uint32_t)-1, block_descriptor.x == descriptor_status_aggregate_not_ready));
delay = 0;
{
full_aggregate_lane = 31 - __clz((int32_t)__ballot_sync((uint32_t)-1, block_descriptor.x != descriptor_status_partial_aggregate_ready));
block_descriptor.y = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.y : 0;
}
{
block_exclusive_suffix_index = hipcub::Sum()(block_exclusive_suffix_index, libcubwt_warp_reduce_sum(block_descriptor.y));
}
} while (full_aggregate_lane == -1);
warp_inclusive_suffix_index = hipcub::Sum()(warp_inclusive_suffix_index, block_exclusive_suffix_index);
}
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint2(descriptor_status_full_aggregate_ready, warp_inclusive_suffix_index));
}
}
{
if (threadIdx.x == 0)
{
warp_state[0] = block_exclusive_suffix_index;
}
warp_state[1 + threadIdx.x] = warp_inclusive_suffix_index;
}
}
__syncthreads();
}
{
if (thread_suffix_index[3] > 0)
{
uint32_t warp_exclusive_state = warp_state[threadIdx.x / CUDA_WARP_THREADS];
thread_exclusive_suffix_index = hipcub::Sum()(thread_exclusive_suffix_index, warp_exclusive_state);
thread_suffix_index[0] = hipcub::Sum()(thread_suffix_index[0], thread_exclusive_suffix_index);
thread_suffix_index[1] = hipcub::Sum()(thread_suffix_index[1], thread_exclusive_suffix_index);
thread_suffix_index[2] = hipcub::Sum()(thread_suffix_index[2], thread_exclusive_suffix_index);
thread_suffix_index[3] = hipcub::Sum()(thread_suffix_index[3], thread_exclusive_suffix_index);
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
const uint4 indexes = __ldg((uint4 *)(device_SA + thread_index));
if (thread_exclusive_suffix_index != thread_suffix_index[0]) { device_out_keys[thread_suffix_index[0]] = thread_index + 0; device_out_SA[thread_suffix_index[0]] = indexes.x; }
if (thread_suffix_index[0] != thread_suffix_index[1]) { device_out_keys[thread_suffix_index[1]] = thread_index + 1; device_out_SA[thread_suffix_index[1]] = indexes.y; }
if (thread_suffix_index[1] != thread_suffix_index[2]) { device_out_keys[thread_suffix_index[2]] = thread_index + 2; device_out_SA[thread_suffix_index[2]] = indexes.z; }
if (thread_suffix_index[2] != thread_suffix_index[3]) { device_out_keys[thread_suffix_index[3]] = thread_index + 3; device_out_SA[thread_suffix_index[3]] = indexes.w; }
}
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_update_suffix_sorting_keys_kernel(const uint8_t * RESTRICT device_heads, const uint32_t * RESTRICT device_SA, const uint32_t * RESTRICT device_ISA, uint32_t * RESTRICT device_keys)
{
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
device_heads += thread_index;
const uchar4 current_heads = __ldg((uchar4 *)(device_heads));
const uint8_t next_head = current_heads.w > 0 ? __ldg(device_heads + 4) : 0;
if (current_heads.x + current_heads.y + current_heads.z + current_heads.w + next_head < 5)
{
device_SA += thread_index; device_keys += thread_index;
const uint4 current_SA = __ldg((uint4 *)(device_SA));
((uint4 *)device_keys)[0] = make_uint4(
(current_heads.x + current_heads.y < 2) ? __ldg(device_ISA + current_SA.x) : (uint32_t)-1,
(current_heads.y + current_heads.z < 2) ? __ldg(device_ISA + current_SA.y) : (uint32_t)-2,
(current_heads.z + current_heads.w < 2) ? __ldg(device_ISA + current_SA.z) : (uint32_t)-3,
(current_heads.w + next_head < 2) ? __ldg(device_ISA + current_SA.w) : (uint32_t)-4);
}
}
static hipError_t libcubwt_update_suffix_sorting_keys(LIBCUBWT_DEVICE_STORAGE * storage, int64_t n, int64_t iteration, int64_t depth)
{
hipError_t status = hipSuccess;
int64_t n_ranking_blocks = (n + storage->cuda_block_threads * 4 - 1) / (storage->cuda_block_threads * 4);
bool gather_keys_directly = (n <= ((int64_t)1 << (storage->device_L2_cache_bits - 2))) || (n > ((int64_t)1 << (storage->device_L2_cache_bits - 2 + 8)));
if (gather_keys_directly || (storage->num_unsorted_suffixes <= (n / 4)))
{
hipLaunchKernelGGL(( libcubwt_update_suffix_sorting_keys_kernel), dim3((uint32_t)n_ranking_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream, storage->device_heads, storage->device_SA, storage->device_ISA + depth, storage->device_keys);
}
else
{
if ((iteration % 2) == 0)
{
hipLaunchKernelGGL(( libcubwt_gather_unsorted_suffixes_kernel<false>), dim3((uint32_t)n_ranking_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_heads,
storage->device_SA,
storage->device_temp_keys - 1, storage->device_temp_SA - 1,
storage->device_descriptors_small);
}
else
{
hipLaunchKernelGGL(( libcubwt_gather_unsorted_suffixes_kernel<true>), dim3((uint32_t)n_ranking_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_heads,
storage->device_SA,
storage->device_temp_keys - 1, storage->device_temp_SA - 1,
storage->device_descriptors_small);
}
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMemcpyAsync(storage->host_pinned_storage, &storage->device_descriptors_small[n_ranking_blocks - 1], sizeof(uint2), hipMemcpyDeviceToHost, storage->cuda_stream));
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipStreamSynchronize(storage->cuda_stream), status);
if (status == hipSuccess)
{
storage->num_unsorted_suffixes = ((uint2 *)storage->host_pinned_storage)->y;
if (storage->num_unsorted_suffixes > 0)
{
status = libcubwt_gather_scatter_values_uint32(storage, storage->device_temp_SA, storage->device_ISA + depth, storage->device_temp_keys, storage->device_keys, storage->num_unsorted_suffixes, n, storage->device_temp_ISA, storage->device_keys);
}
}
}
return status;
}
static hipError_t libcubwt_sort_segmented_suffixes_by_rank(LIBCUBWT_DEVICE_STORAGE * storage, int64_t n)
{
cub::DoubleBuffer<uint32_t> d_keys(storage->device_keys, storage->device_temp_keys);
cub::DoubleBuffer<uint32_t> d_values(storage->device_SA, storage->device_temp_SA);
hipError_t status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceSegmentedSort::SortPairs(
storage->device_ssort_temp_storage, storage->device_ssort_temp_storage_size,
d_keys, d_values,
(int)storage->num_unsorted_suffixes, (int)storage->num_unsorted_segments,
storage->device_offsets, storage->device_offsets + (n / 2),
storage->cuda_stream));
if (d_keys.selector) { std::swap(storage->device_keys, storage->device_temp_keys); }
if (d_values.selector) { std::swap(storage->device_SA, storage->device_temp_SA); }
return status;
}
template <bool process_auxiliary_indexes>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_prepare_mod12_suffixes_kernel(const uint8_t * RESTRICT device_T, const uint32_t * RESTRICT device_ISA, const uint64_t * RESTRICT device_suffixes, const uint32_t rm, const uint32_t rs)
{
__shared__ union
{
struct
{
__align__(32) uint32_t bytes[4 * CUDA_BLOCK_THREADS];
__align__(32) uint4 ranks[3 * CUDA_BLOCK_THREADS];
} stage1;
struct
{
__align__(32) uint4 suffixes[4 * CUDA_BLOCK_THREADS];
} stage2;
} shared_storage;
{
device_T += blockIdx.x * CUDA_BLOCK_THREADS * 12 + threadIdx.x * 16;
device_ISA += blockIdx.x * CUDA_BLOCK_THREADS * 8 + threadIdx.x * 4;
uint4 * RESTRICT thread_bytes = (uint4 *)shared_storage.stage1.bytes + threadIdx.x;
uint4 * RESTRICT thread_ranks = (uint4 *)shared_storage.stage1.ranks + threadIdx.x;
if (threadIdx.x < (12 * CUDA_BLOCK_THREADS + 4 + 15) / 16) { thread_bytes[0] = __ldg((uint4 *)device_T); }
thread_ranks[0] = __ldg((uint4 *)device_ISA); thread_ranks += CUDA_BLOCK_THREADS; device_ISA += CUDA_BLOCK_THREADS * 4;
thread_ranks[0] = __ldg((uint4 *)device_ISA); thread_ranks += CUDA_BLOCK_THREADS; device_ISA += CUDA_BLOCK_THREADS * 4;
if (threadIdx.x == 0) { thread_ranks[0] = __ldg((uint4 *)device_ISA); }
}
{
__syncthreads();
uint32_t bytes0 = shared_storage.stage1.bytes[threadIdx.x * 3 + 0];
uint32_t bytes1 = shared_storage.stage1.bytes[threadIdx.x * 3 + 1];
uint32_t bytes2 = shared_storage.stage1.bytes[threadIdx.x * 3 + 2];
uint32_t bytes3 = shared_storage.stage1.bytes[threadIdx.x * 3 + 3];
uint4 ranks0 = shared_storage.stage1.ranks[threadIdx.x * 2 + 0];
uint4 ranks1 = shared_storage.stage1.ranks[threadIdx.x * 2 + 1];
uint4 ranks2 = shared_storage.stage1.ranks[threadIdx.x * 2 + 2];
__syncthreads();
uint32_t v4 = 0, v8 = 0;
if (process_auxiliary_indexes)
{
const uint32_t i4 = blockIdx.x * CUDA_BLOCK_THREADS * 12 + threadIdx.x * 12 + 4 + rm + 1;
const uint32_t i8 = blockIdx.x * CUDA_BLOCK_THREADS * 12 + threadIdx.x * 12 + 8 + rm + 1;
if ((i4 & rm) == 0) { v4 = (i4 >> rs) << 24; }
if ((i8 & rm) == 0) { v8 = (i8 >> rs) << 24; }
}
shared_storage.stage2.suffixes[threadIdx.x * 4 + 0] = make_uint4
(
ranks0.y, __byte_perm(bytes0, 0, 0x4021),
ranks0.z | (uint32_t)INT32_MIN, __byte_perm(bytes0, 0, 0x4132)
);
shared_storage.stage2.suffixes[threadIdx.x * 4 + 1] = make_uint4
(
ranks0.w, (__byte_perm(bytes0, bytes1, 0x0354) & 0xffffffu) | v4,
ranks1.x | (uint32_t)INT32_MIN, __byte_perm(bytes1, 0, 0x4021)
);
shared_storage.stage2.suffixes[threadIdx.x * 4 + 2] = make_uint4
(
ranks1.y, __byte_perm(bytes1, bytes2, 0x0243) & 0xffffffu,
ranks1.z | (uint32_t)INT32_MIN, (__byte_perm(bytes1, bytes2, 0x0354) & 0xffffffu) | v8
);
shared_storage.stage2.suffixes[threadIdx.x * 4 + 3] = make_uint4
(
ranks1.w, __byte_perm(bytes2, 0, 0x4132),
ranks2.x | (uint32_t)INT32_MIN, __byte_perm(bytes2, bytes3, 0x0243) & 0xffffffu
);
__syncwarp();
}
{
device_suffixes += blockIdx.x * CUDA_BLOCK_THREADS * 8;
uint4 * RESTRICT thread_src = shared_storage.stage2.suffixes + ((threadIdx.x / CUDA_WARP_THREADS) * CUDA_WARP_THREADS * 4) + (threadIdx.x % CUDA_WARP_THREADS);
uint4 * RESTRICT thread_dst = (uint4 *)device_suffixes + ((threadIdx.x / CUDA_WARP_THREADS) * CUDA_WARP_THREADS * 4) + (threadIdx.x % CUDA_WARP_THREADS);
thread_dst[0] = thread_src[0]; thread_src += CUDA_WARP_THREADS; thread_dst += CUDA_WARP_THREADS;
thread_dst[0] = thread_src[0]; thread_src += CUDA_WARP_THREADS; thread_dst += CUDA_WARP_THREADS;
thread_dst[0] = thread_src[0]; thread_src += CUDA_WARP_THREADS; thread_dst += CUDA_WARP_THREADS;
thread_dst[0] = thread_src[0];
}
}
template <bool process_auxiliary_indexes>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_prepare_mod0_suffixes_kernel(const uint8_t * RESTRICT device_T, const uint32_t * RESTRICT device_ISA, const uint64_t * RESTRICT device_suffixes_lh, const uint32_t * RESTRICT device_suffixes_hh, const uint32_t rm, const uint32_t rs)
{
__shared__ __align__(32) uint16_t bytes[3 * CUDA_BLOCK_THREADS + 8];
{
device_T += blockIdx.x * CUDA_BLOCK_THREADS * 6 + threadIdx.x * 16;
uint4 * RESTRICT thread_bytes = (uint4 *)bytes + threadIdx.x;
if (threadIdx.x <= (6 * CUDA_BLOCK_THREADS) / 16) { thread_bytes[0] = __ldg((uint4 *)(device_T - 16)); }
}
{
device_ISA += blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
device_suffixes_lh += blockIdx.x * CUDA_BLOCK_THREADS * 2 + threadIdx.x * 2;
device_suffixes_hh += blockIdx.x * CUDA_BLOCK_THREADS * 2 + threadIdx.x * 2;
__syncthreads();
uint32_t bytes0 = bytes[threadIdx.x * 3 + 7 ];
uint32_t bytes1 = bytes[threadIdx.x * 3 + 8 ];
uint32_t bytes2 = bytes[threadIdx.x * 3 + 9 ];
uint32_t bytes3 = bytes[threadIdx.x * 3 + 10];
uint4 ranks = __ldg((uint4 *)(device_ISA));
uint32_t v0 = 0;
if (process_auxiliary_indexes)
{
const uint32_t i0 = blockIdx.x * CUDA_BLOCK_THREADS * 6 + threadIdx.x * 6 + 0 + rm + 1;
if ((i0 & rm) == 0) { v0 = (i0 >> rs) << 24; }
}
else if ((blockIdx.x | threadIdx.x) == 0)
{
v0 = 1u << 24;
}
*(uint4 *)(device_suffixes_lh) = make_uint4
(
ranks.x, __byte_perm(bytes0, bytes1, 0x3154) | v0,
ranks.z, __byte_perm(bytes2, bytes3, 0x3041)
);
*(uint2 *)(device_suffixes_hh) = make_uint2(ranks.y | (uint32_t)INT32_MIN, ranks.w | (uint32_t)INT32_MIN);
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, 1)
static void libcubwt_set_sentinel_suffixes_kernel(uint64_t * RESTRICT device_mod0l_suffixes_end, uint32_t * RESTRICT device_mod0h_suffixes_end,uint64_t * RESTRICT device_mod12_suffixes_end)
{
uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS + threadIdx.x;
device_mod0l_suffixes_end += thread_index;
device_mod0h_suffixes_end += thread_index;
device_mod12_suffixes_end += thread_index;
*(uint2 *)(device_mod0l_suffixes_end) = make_uint2(0x7fffffffu - 12 * CUDA_BLOCK_THREADS + 2 * thread_index + 0, 0x00ffffffu);
*(uint32_t *)(device_mod0h_suffixes_end) = (uint32_t)(0xffffffffu - 12 * CUDA_BLOCK_THREADS + 2 * thread_index + 0 );
*(uint2 *)(device_mod12_suffixes_end) = make_uint2(0x7fffffffu - 12 * CUDA_BLOCK_THREADS + 2 * thread_index + 1, 0x00ffffffu);
}
__device__ __forceinline__
bool libcubwt_compare_suffixes_kernel(const uint2 mod0l_suffix, const uint32_t mod0h_suffix, const uint2 mod12_suffix)
{
uint32_t difference = __byte_perm(mod0l_suffix.y, 0, 0x4401) - __byte_perm(mod12_suffix.y, 0, 0x4401);
if (difference == 0) { difference = (((int32_t)mod12_suffix.x < 0) ? mod0h_suffix : mod0l_suffix.x) - mod12_suffix.x; }
return (int32_t)difference <= 0;
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_suffixes_merge_initialization_kernel(
const uint64_t * RESTRICT device_mod0l_suffixes,
const uint32_t * RESTRICT device_mod0h_suffixes,
const uint32_t num_mod0_suffixes,
const uint64_t * RESTRICT device_mod12_suffixes,
const uint32_t num_mod12_suffixes,
uint32_t * RESTRICT device_suffixes_merge_path,
uint32_t num_merging_blocks)
{
uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS + threadIdx.x;
if (thread_index <= num_merging_blocks)
{
uint32_t diagonal = thread_index * CUDA_BLOCK_THREADS * 5;
uint32_t begin = (diagonal > num_mod12_suffixes) ? (diagonal - num_mod12_suffixes) : 0;
uint32_t end = (diagonal > num_mod0_suffixes ) ? (num_mod0_suffixes ) : diagonal;
while (begin < end)
{
uint32_t pivot = begin + ((end - begin) >> 1);
bool predicate = libcubwt_compare_suffixes_kernel(
__ldg((uint2 *)(device_mod0l_suffixes + pivot)),
__ldg((uint32_t *)(device_mod0h_suffixes + pivot)),
__ldg((uint2 *)(device_mod12_suffixes + diagonal - pivot - 1)));
begin = predicate ? (pivot + 1) : begin;
end = predicate ? (end ) : pivot;
}
__syncwarp();
device_suffixes_merge_path[thread_index] = begin;
}
}
template <bool process_auxiliary_indexes>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_merge_suffixes_kernel(
const uint64_t * RESTRICT device_mod0l_suffixes,
const uint32_t * RESTRICT device_mod0h_suffixes,
const uint64_t * RESTRICT device_mod12_suffixes,
const uint32_t * RESTRICT device_suffixes_merge_path,
uint32_t * RESTRICT device_auxiliary_indexes,
uint8_t * RESTRICT device_L)
{
__shared__ union
{
struct
{
__align__(32) uint2 suffixes_l[CUDA_BLOCK_THREADS * 5 + 12];
__align__(32) uint32_t suffixes_h[CUDA_BLOCK_THREADS * 5 + 12];
} stage1;
struct
{
__align__(32) uint8_t bwt[CUDA_BLOCK_THREADS * 5];
} stage2;
} shared_storage;
uint32_t num_mod0_suffixes;
uint32_t num_mod12_suffixes;
{
const uint32_t block_mod0_path_begin = (device_suffixes_merge_path + blockIdx.x)[0];
const uint32_t block_mod0_path_end = (device_suffixes_merge_path + blockIdx.x)[1];
num_mod0_suffixes = block_mod0_path_end - block_mod0_path_begin + 6;
num_mod12_suffixes = CUDA_BLOCK_THREADS * 5 + 12 - num_mod0_suffixes;
device_mod0l_suffixes += block_mod0_path_begin;
device_mod0h_suffixes += block_mod0_path_begin;
device_mod12_suffixes += (blockIdx.x * CUDA_BLOCK_THREADS * 5 - block_mod0_path_begin);
device_mod12_suffixes -= num_mod0_suffixes;
#pragma unroll
for (uint32_t thread_index = threadIdx.x; thread_index < CUDA_BLOCK_THREADS * 5 + 12; thread_index += CUDA_BLOCK_THREADS)
{
if (thread_index < num_mod0_suffixes) { shared_storage.stage1.suffixes_h[thread_index] = __ldg(device_mod0h_suffixes + thread_index); }
shared_storage.stage1.suffixes_l[thread_index] = __ldg((uint2 *)(thread_index < num_mod0_suffixes ? device_mod0l_suffixes : device_mod12_suffixes) + thread_index);
}
__syncthreads();
}
{
uint32_t diagonal = threadIdx.x * 5;
uint32_t begin = (diagonal > num_mod12_suffixes) ? (diagonal - num_mod12_suffixes) : 0;
uint32_t end = (diagonal > num_mod0_suffixes ) ? (num_mod0_suffixes ) : diagonal;
while (begin < end)
{
uint32_t pivot = (begin + end) >> 1;
bool predicate = libcubwt_compare_suffixes_kernel(
shared_storage.stage1.suffixes_l[pivot],
shared_storage.stage1.suffixes_h[pivot],
shared_storage.stage1.suffixes_l[num_mod0_suffixes + diagonal - pivot - 1]);
begin = predicate ? (pivot + 1) : begin;
end = predicate ? (end ) : pivot;
}
__syncwarp();
uint32_t suffixes[5];
{
uint32_t mod0_index = begin;
uint32_t mod12_index = num_mod0_suffixes + diagonal - begin;
uint2 mod0l_suffix = shared_storage.stage1.suffixes_l[mod0_index];
uint32_t mod0h_suffix = shared_storage.stage1.suffixes_h[mod0_index];
uint2 mod12_suffix = shared_storage.stage1.suffixes_l[mod12_index];
#pragma unroll
for (uint32_t item = 0; item < 5; ++item)
{
bool predicate = libcubwt_compare_suffixes_kernel(mod0l_suffix, mod0h_suffix, mod12_suffix);
suffixes[item] = predicate ? mod0l_suffix.y : mod12_suffix.y;
if ( predicate) { mod0_index += 1; mod0l_suffix = shared_storage.stage1.suffixes_l[mod0_index]; mod0h_suffix = shared_storage.stage1.suffixes_h[mod0_index]; }
if (!predicate) { mod12_index += 1; mod12_suffix = shared_storage.stage1.suffixes_l[mod12_index]; }
}
__syncthreads();
}
{
#pragma unroll
for (uint32_t item = 0; item < 5; ++item)
{
if (suffixes[item] >= 0x01000000u)
{
device_auxiliary_indexes[process_auxiliary_indexes ? suffixes[item] >> 24 : 1] = blockIdx.x * CUDA_BLOCK_THREADS * 5 + diagonal + item;
}
shared_storage.stage2.bwt[diagonal + item] = (uint8_t)(suffixes[item] >> 16);
}
__syncthreads();
}
}
{
device_L += blockIdx.x * CUDA_BLOCK_THREADS * 5 + threadIdx.x * 16;
if (threadIdx.x < (CUDA_BLOCK_THREADS * 5 / 16)) { ((uint4 *)device_L)[0] = ((uint4 *)shared_storage.stage2.bwt)[threadIdx.x]; }
}
}
static hipError_t libcubwt_compute_burrows_wheeler_transform(LIBCUBWT_DEVICE_STORAGE * storage, const uint8_t * T, int64_t input_n, int64_t r, uint32_t * I)
{
hipError_t status = hipSuccess;
int64_t reduced_n = (input_n / 3) * 2 + 2;
int64_t expanded_n = (reduced_n / 2) * 3 + 0;
int64_t num_indexes = (input_n + r - 1) / r;
if ((status = libcubwt_initialize_device_arrays(storage, T, reduced_n, expanded_n, input_n)) == hipSuccess)
{
status = libcubwt_sort_suffixes_by_prefix(storage, reduced_n);
}
if (status == hipSuccess)
{
for (int64_t iteration = 0, depth = 4; true; iteration += 1, depth *= 2)
{
if ((status = libcubwt_rank_and_segment_suffixes(storage, reduced_n, iteration)) != hipSuccess)
{
break;
}
if (storage->num_unsorted_segments == 0)
{
break;
}
if ((status = libcubwt_update_suffix_sorting_keys(storage, reduced_n, iteration, depth)) != hipSuccess)
{
break;
}
if ((status = libcubwt_sort_segmented_suffixes_by_rank(storage, reduced_n)) != hipSuccess)
{
break;
}
}
}
if (status == hipSuccess)
{
int64_t num_mod0_suffixes = (input_n / 3) * 1 + ((input_n % 3) != 0);
int64_t num_mod12_suffixes = (input_n / 3) * 2 + ((input_n % 3) == 2);
if ((status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMemcpyAsync(storage->device_temp_ISA, storage->device_ISA, reduced_n * sizeof(uint32_t), hipMemcpyDeviceToDevice, storage->cuda_stream))) == hipSuccess)
{
cub::DoubleBuffer<uint64_t> db_mod12_suffixes(storage->device_keys_temp_keys, storage->device_SA_temp_SA);
if (status == hipSuccess)
{
{
int64_t n_preparing_blocks = (num_mod12_suffixes + storage->cuda_block_threads * 8 - 1) / (storage->cuda_block_threads * 8);
if (num_indexes > 1)
{
uint32_t rm = (uint32_t)(r - 1), rs = 0; while (rm >= ((uint32_t)1 << rs)) { rs += 1; }
hipLaunchKernelGGL(( libcubwt_prepare_mod12_suffixes_kernel<true>), dim3((uint32_t)n_preparing_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_T, storage->device_ISA,
db_mod12_suffixes.Current(),
rm, rs);
}
else
{
hipLaunchKernelGGL(( libcubwt_prepare_mod12_suffixes_kernel<false>), dim3((uint32_t)n_preparing_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_T, storage->device_ISA,
db_mod12_suffixes.Current(),
0, 0);
}
}
{
cub::DoubleBuffer<uint32_t> db_index(storage->device_ISA, storage->device_offsets);
status = libcubwt_scatter_values_uint64(storage, db_index, db_mod12_suffixes, num_mod12_suffixes, reduced_n, reduced_n - num_mod12_suffixes);
}
}
cub::DoubleBuffer<uint32_t> db_mod0h_suffixes(storage->device_ISA, storage->device_offsets);
cub::DoubleBuffer<uint64_t> db_mod0l_suffixes = db_mod12_suffixes.Current() == storage->device_keys_temp_keys
? cub::DoubleBuffer<uint64_t>((uint64_t *)storage->device_SA, (uint64_t *)storage->device_temp_SA)
: cub::DoubleBuffer<uint64_t>((uint64_t *)storage->device_keys, (uint64_t *)storage->device_temp_keys);
if (status == hipSuccess)
{
{
int64_t n_preparing_blocks = (num_mod0_suffixes + storage->cuda_block_threads * 2 - 1) / (storage->cuda_block_threads * 2);
if (num_indexes > 1)
{
uint32_t rm = (uint32_t)(r - 1), rs = 0; while (rm >= ((uint32_t)1 << rs)) { rs += 1; }
hipLaunchKernelGGL(( libcubwt_prepare_mod0_suffixes_kernel<true>), dim3((uint32_t)n_preparing_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_T, storage->device_temp_ISA,
db_mod0l_suffixes.Current(), db_mod0h_suffixes.Current(),
rm, rs);
}
else
{
hipLaunchKernelGGL(( libcubwt_prepare_mod0_suffixes_kernel<false>), dim3((uint32_t)n_preparing_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
storage->device_T, storage->device_temp_ISA,
db_mod0l_suffixes.Current(), db_mod0h_suffixes.Current(),
0, 0);
}
}
if (reduced_n <= (1 << 24))
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_mod0l_suffixes, db_mod0h_suffixes,
(uint32_t)num_mod0_suffixes,
0, 24,
storage->cuda_stream));
if (status == hipSuccess)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_mod0l_suffixes, db_mod0h_suffixes,
(uint32_t)num_mod0_suffixes,
32, 40,
storage->cuda_stream));
}
}
else
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_mod0l_suffixes, db_mod0h_suffixes,
(uint32_t)num_mod0_suffixes,
0, 40,
storage->cuda_stream));
}
}
if (status == hipSuccess)
{
int64_t n_merging_blocks = (input_n + storage->cuda_block_threads * 5 - 1) / (storage->cuda_block_threads * 5);
{
hipLaunchKernelGGL(( libcubwt_set_sentinel_suffixes_kernel), dim3(6), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
db_mod0l_suffixes.Current() + num_mod0_suffixes,
db_mod0h_suffixes.Current() + num_mod0_suffixes,
db_mod12_suffixes.Current() + num_mod12_suffixes);
}
{
int64_t n_merge_initialization_blocks = 1 + (n_merging_blocks / storage->cuda_block_threads);
hipLaunchKernelGGL(( libcubwt_suffixes_merge_initialization_kernel), dim3((uint32_t)n_merge_initialization_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
db_mod0l_suffixes.Current(), db_mod0h_suffixes.Current(), (uint32_t)(num_mod0_suffixes + 6 * storage->cuda_block_threads),
db_mod12_suffixes.Current(), (uint32_t)(num_mod12_suffixes + 6 * storage->cuda_block_threads),
(uint32_t *)storage->device_descriptors_large, (uint32_t)n_merging_blocks);
}
{
if (num_indexes > 1)
{
hipLaunchKernelGGL(( libcubwt_merge_suffixes_kernel<true>), dim3((uint32_t)n_merging_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
db_mod0l_suffixes.Current(), db_mod0h_suffixes.Current(), db_mod12_suffixes.Current(),
(uint32_t *)storage->device_descriptors_large,
(uint32_t *)storage->device_descriptors_small - 1,
storage->device_T);
}
else
{
hipLaunchKernelGGL(( libcubwt_merge_suffixes_kernel<false>), dim3((uint32_t)n_merging_blocks), dim3(storage->cuda_block_threads), 0, storage->cuda_stream,
db_mod0l_suffixes.Current(), db_mod0h_suffixes.Current(), db_mod12_suffixes.Current(),
(uint32_t *)storage->device_descriptors_large,
(uint32_t *)storage->device_descriptors_small - 1,
storage->device_T);
}
}
}
if (status == hipSuccess)
{
uint32_t * buffer = ((sizeof(uint32_t) * num_indexes) <= storage->host_pinned_storage_size) ? (uint32_t *)storage->host_pinned_storage : I;
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMemcpyAsync(buffer, storage->device_descriptors_small, sizeof(uint32_t) * num_indexes, hipMemcpyDeviceToHost, storage->cuda_stream), status);
if ((status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipStreamSynchronize(storage->cuda_stream), status)) == hipSuccess)
{
if (I != buffer) { memcpy(I, buffer, sizeof(uint32_t) * num_indexes); }
for (int64_t index = 0; index < num_indexes; index += 1) { I[index] += 1; }
}
}
}
}
return status;
}
static hipError_t libcubwt_copy_burrows_wheeler_transform(LIBCUBWT_DEVICE_STORAGE * storage, const uint8_t * T, uint8_t * L, int64_t input_n, int64_t index)
{
hipError_t status = hipSuccess;
L[0] = T[input_n - 1];
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMemcpyAsync(L + 1, storage->device_T, (size_t)(index - 1), hipMemcpyDeviceToHost, storage->cuda_stream), status);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMemcpyAsync(L + index, storage->device_T + index, (size_t)(input_n - index), hipMemcpyDeviceToHost, storage->cuda_stream), status);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipStreamSynchronize(storage->cuda_stream), status);
return status;
}
int64_t libcubwt_allocate_device_storage(void ** device_storage, int64_t max_length)
{
int64_t max_reduced_length = ((max_length / 3) * 2 + 2 + 1023) & (-1024);
int64_t max_expanded_length = ((max_reduced_length / 2) * 3 + 0 + 1023) & (-1024);
if ((device_storage == NULL) || (max_expanded_length >= INT32_MAX))
{
return LIBCUBWT_BAD_PARAMETER;
}
*device_storage = NULL;
LIBCUBWT_DEVICE_STORAGE * storage = (LIBCUBWT_DEVICE_STORAGE *)malloc(sizeof(LIBCUBWT_DEVICE_STORAGE));
if (storage != NULL)
{
memset(storage, 0, sizeof(LIBCUBWT_DEVICE_STORAGE));
hipError_t status = hipSuccess;
{
int32_t cuda_device_ordinal;
int32_t cuda_device_L2_cache_size;
int32_t cuda_device_capability;
libcubwt_cuda_safe_call(__FILE__, __LINE__, hipGetDevice(&cuda_device_ordinal), status);
libcubwt_cuda_safe_call(__FILE__, __LINE__, hipDeviceGetAttribute(&cuda_device_L2_cache_size, hipDeviceAttributeL2CacheSize, cuda_device_ordinal), status);
libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::PtxVersion(cuda_device_capability, cuda_device_ordinal), status);
if (status == hipSuccess)
{
storage->device_L2_cache_bits = 0; while (cuda_device_L2_cache_size >>= 1) { storage->device_L2_cache_bits += 1; };
storage->cuda_block_threads = (cuda_device_capability == 860 || cuda_device_capability == 870 || cuda_device_capability == 890) ? 768 : 512;
}
}
if (status == hipSuccess)
{
int64_t num_descriptors = ((max_reduced_length / (storage->cuda_block_threads * 4)) + 1024) & (-1024);
{
cub::DoubleBuffer<uint8_t> uint8_db;
cub::DoubleBuffer<uint32_t> uint32_db;
cub::DoubleBuffer<uint64_t> uint64_db;
size_t temp_radix_segmented_sort_k32v32 = 0;
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceSegmentedSort::SortPairs(NULL, temp_radix_segmented_sort_k32v32, uint32_db, uint32_db, (int)max_reduced_length, (int)max_reduced_length / 2, uint32_db.Current(), uint32_db.Current()), status);
size_t temp_radix_sort_k32v32 = 0;
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(NULL, temp_radix_sort_k32v32, uint32_db, uint32_db, (uint32_t)max_reduced_length), status);
size_t temp_radix_sort_k64v32 = 0;
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(NULL, temp_radix_sort_k64v32, uint64_db, uint32_db, (uint32_t)max_reduced_length), status);
size_t temp_radix_sort_k32v64 = 0;
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipcub::DeviceRadixSort::SortPairs(NULL, temp_radix_sort_k32v64, uint32_db, uint64_db, (uint32_t)max_reduced_length), status);
storage->device_ssort_temp_storage_size = ::max(temp_radix_segmented_sort_k32v32, (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint32_t));
storage->device_rsort_temp_storage_size = ::max(::max(temp_radix_sort_k32v32, temp_radix_sort_k64v32), temp_radix_sort_k32v64);
storage->device_ssort_temp_storage_size = (storage->device_ssort_temp_storage_size + (size_t)1023) & (size_t)(-1024);
storage->device_rsort_temp_storage_size = (storage->device_rsort_temp_storage_size + (size_t)1023) & (size_t)(-1024);
}
if (status == hipSuccess)
{
size_t device_storage_size = 0;
device_storage_size += storage->device_ssort_temp_storage_size;
device_storage_size += storage->device_rsort_temp_storage_size;
device_storage_size += (max_expanded_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint8_t);
device_storage_size += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint8_t);
device_storage_size += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
device_storage_size += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
device_storage_size += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
device_storage_size += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint4);
device_storage_size += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint4);
device_storage_size += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint2);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipMalloc((void **)&storage->device_storage, device_storage_size), status);
if (status == hipSuccess)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipHostMalloc((void **)&storage->host_pinned_storage, storage->host_pinned_storage_size = 256 * sizeof(uint32_t)), status);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipStreamCreate(&storage->cuda_stream), status);
}
}
if (status == hipSuccess)
{
uint8_t * device_alloc = (uint8_t *)storage->device_storage;
storage->device_ssort_temp_storage = (void *)device_alloc; device_alloc += storage->device_ssort_temp_storage_size;
storage->device_rsort_temp_storage = (void *)device_alloc; device_alloc += storage->device_rsort_temp_storage_size;
storage->device_T = (uint8_t *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (max_expanded_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint8_t);
storage->device_heads = (uint8_t *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint8_t);
storage->device_SA_temp_SA = (uint64_t *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
storage->device_keys_temp_keys = (uint64_t *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
storage->device_offsets_ISA = (uint64_t *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
storage->device_descriptors_large = (uint4 *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint4);
storage->device_descriptors_copy = (uint4 *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint4);
storage->device_descriptors_small = (uint2 *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint2);
storage->device_temp_ISA = (uint32_t *)(void *)storage->device_ssort_temp_storage + CUDA_DEVICE_PADDING;
storage->device_SA = (uint32_t *)(void *)(storage->device_SA_temp_SA - CUDA_DEVICE_PADDING) + 1 * CUDA_DEVICE_PADDING;
storage->device_keys = (uint32_t *)(void *)(storage->device_keys_temp_keys - CUDA_DEVICE_PADDING) + 1 * CUDA_DEVICE_PADDING;
storage->device_offsets = (uint32_t *)(void *)(storage->device_offsets_ISA - CUDA_DEVICE_PADDING) + 1 * CUDA_DEVICE_PADDING;
storage->device_temp_SA = (uint32_t *)(void *)(storage->device_SA_temp_SA - CUDA_DEVICE_PADDING) + 3 * CUDA_DEVICE_PADDING + max_reduced_length;
storage->device_temp_keys = (uint32_t *)(void *)(storage->device_keys_temp_keys - CUDA_DEVICE_PADDING) + 3 * CUDA_DEVICE_PADDING + max_reduced_length;
storage->device_ISA = (uint32_t *)(void *)(storage->device_offsets_ISA - CUDA_DEVICE_PADDING) + 3 * CUDA_DEVICE_PADDING + max_reduced_length;
storage->max_length = max_length;
*device_storage = storage;
return LIBCUBWT_NO_ERROR;
}
}
libcubwt_free_device_storage(storage);
return libcubwt_get_error_code(status);
}
return LIBCUBWT_NOT_ENOUGH_MEMORY;
}
int64_t libcubwt_free_device_storage(void * device_storage)
{
hipError_t status = hipSuccess;
LIBCUBWT_DEVICE_STORAGE * storage = (LIBCUBWT_DEVICE_STORAGE *)device_storage;
if (storage != NULL)
{
if (storage->device_storage != NULL)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipStreamDestroy(storage->cuda_stream), status);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipHostFree((void *)storage->host_pinned_storage), status);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, hipFree((void *)storage->device_storage), status);
}
free(storage);
}
return status != hipSuccess ? libcubwt_get_error_code(status) : LIBCUBWT_NO_ERROR;
}
int64_t libcubwt_bwt(void * device_storage, const uint8_t * T, uint8_t * L, int64_t n)
{
LIBCUBWT_DEVICE_STORAGE * storage = (LIBCUBWT_DEVICE_STORAGE *)device_storage;
if ((storage == NULL) || (T == NULL) || (L == NULL) || (n < 16) || (n > storage->max_length))
{
return LIBCUBWT_BAD_PARAMETER;
}
hipError_t status; uint32_t index;
if ((status = libcubwt_compute_burrows_wheeler_transform(storage, T, n, n, &index)) == hipSuccess &&
(status = libcubwt_copy_burrows_wheeler_transform(storage, T, L, n, index)) == hipSuccess)
{
return index;
}
return libcubwt_get_error_code(status);
}
int64_t libcubwt_bwt_aux(void * device_storage, const uint8_t * T, uint8_t * L, int64_t n, int64_t r, uint32_t * I)
{
LIBCUBWT_DEVICE_STORAGE * storage = (LIBCUBWT_DEVICE_STORAGE *)device_storage;
if ((storage == NULL) || (T == NULL) || (L == NULL) || (n < 16) || (n > storage->max_length) || (r < 4) || ((r & (r - 1)) != 0) || ((n + r - 1) / r > 255) || (I == NULL))
{
return LIBCUBWT_BAD_PARAMETER;
}
hipError_t status;
if ((status = libcubwt_compute_burrows_wheeler_transform(storage, T, n, r, I)) == hipSuccess &&
(status = libcubwt_copy_burrows_wheeler_transform(storage, T, L, n, I[0])) == hipSuccess)
{
return LIBCUBWT_NO_ERROR;
}
return libcubwt_get_error_code(status);
}
| 541b9e4f3d283bf90eddf46cec9acc152f899618.cu | /*--
This file is a part of libcubwt, a library for CUDA accelerated
burrows wheeler transform construction.
Copyright (c) 2022-2023 Ilya Grebnov <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Please see the file LICENSE for full copyright and license details.
--*/
#include "libcubwt.cuh"
#if defined(_MSC_VER) && defined(__INTELLISENSE__)
#define __launch_bounds__(block_size) /* */
#define __CUDACC__
#include <vector_functions.h>
#include <device_functions.h>
#include <device_launch_parameters.h>
#endif
#include <cub/cub.cuh>
#include <cuda.h>
#include <utility>
#if defined(__GNUC__) || defined(__clang__) || defined(__CUDACC__)
#define RESTRICT __restrict__
#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
#define RESTRICT __restrict
#else
#define RESTRICT /* */
#endif
#ifndef __CUDA_ARCH__
#define CUDA_DEVICE_ARCH 0
#else
#define CUDA_DEVICE_ARCH __CUDA_ARCH__
#endif
#if CUDA_DEVICE_ARCH == 750
#define CUDA_SM_THREADS (1024)
#elif CUDA_DEVICE_ARCH == 860 || CUDA_DEVICE_ARCH == 870 || CUDA_DEVICE_ARCH == 890
#define CUDA_SM_THREADS (1536)
#else
#define CUDA_SM_THREADS (2048)
#endif
#if CUDA_DEVICE_ARCH == 860 || CUDA_DEVICE_ARCH == 870 || CUDA_DEVICE_ARCH == 890
#define CUDA_BLOCK_THREADS (768)
#else
#define CUDA_BLOCK_THREADS (512)
#endif
#define CUDA_WARP_THREADS (32)
#define CUDA_DEVICE_PADDING (12 * 768)
typedef struct LIBCUBWT_DEVICE_STORAGE
{
void * device_rsort_temp_storage;
size_t device_rsort_temp_storage_size;
void * device_ssort_temp_storage;
size_t device_ssort_temp_storage_size;
uint8_t * device_T;
uint8_t * device_heads;
uint32_t * device_SA;
uint32_t * device_ISA;
uint32_t * device_keys;
uint32_t * device_offsets;
uint32_t * device_temp_SA;
uint32_t * device_temp_ISA;
uint32_t * device_temp_keys;
uint64_t * device_SA_temp_SA;
uint64_t * device_keys_temp_keys;
uint64_t * device_offsets_ISA;
uint4 * device_descriptors_large;
uint4 * device_descriptors_copy;
uint2 * device_descriptors_small;
void * device_storage;
int32_t device_L2_cache_bits;
void * host_pinned_storage;
size_t host_pinned_storage_size;
int64_t max_length;
uint32_t num_unsorted_segments;
uint32_t num_unsorted_suffixes;
uint32_t cuda_block_threads;
cudaStream_t cuda_stream;
} LIBCUBWT_DEVICE_STORAGE;
static int64_t libcubwt_get_error_code(cudaError_t status)
{
return
status == cudaErrorMemoryAllocation ? LIBCUBWT_GPU_NOT_ENOUGH_MEMORY :
status == cudaErrorDevicesUnavailable ? LIBCUBWT_GPU_NOT_SUPPORTED :
status == cudaErrorNoDevice ? LIBCUBWT_GPU_NOT_SUPPORTED :
LIBCUBWT_GPU_ERROR;
}
static cudaError_t libcubwt_cuda_safe_call(const char * filename, int32_t line, cudaError_t result, cudaError_t status = cudaSuccess)
{
#if !defined(NDEBUG)
if (result != cudaSuccess)
{
fprintf(stderr, "%s(%d): libcubwt_cuda_safe_call failed %d: '%s'.\n", filename, line, result, cudaGetErrorString(result));
fflush(stderr);
}
#else
(void)(filename); (void)(line);
#endif
return result != cudaSuccess ? result : status;
}
template <typename T>
__device__ __forceinline__ T libcubwt_warp_reduce_sum(T value)
{
#if CUDA_DEVICE_ARCH >= 800 && !defined(__CUDA__)
return __reduce_add_sync((uint32_t)-1, value);
#else
#pragma unroll
for (uint32_t mask = CUDA_WARP_THREADS / 2; mask > 0; mask >>= 1)
{
value = cub::Sum()(value, __shfl_xor_sync((uint32_t)-1, value, mask, CUDA_WARP_THREADS));
}
return value;
#endif
}
template <typename T>
__device__ __forceinline__ T libcubwt_warp_reduce_max(T value)
{
#if CUDA_DEVICE_ARCH >= 800 && !defined(__CUDA__)
return __reduce_max_sync((uint32_t)-1, value);
#else
#pragma unroll
for (uint32_t mask = CUDA_WARP_THREADS / 2; mask > 0; mask >>= 1)
{
value = cub::Max()(value, __shfl_xor_sync((uint32_t)-1, value, mask, CUDA_WARP_THREADS));
}
return value;
#endif
}
template <typename T>
__device__ __forceinline__ void libcubwt_delay_or_prevent_hoisting(T delay)
{
#if CUDA_DEVICE_ARCH >= 700
__nanosleep(delay);
#else
__threadfence_block(); (void)(delay);
#endif
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_gather_values_uint32_kernel(const uint32_t * device_idx, const uint32_t * RESTRICT device_src, uint32_t * device_dst, uint32_t m)
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 4;
device_idx += block_index; device_dst += block_index; m -= block_index;
if (m >= CUDA_BLOCK_THREADS * 4)
{
const uint4 indexes = *(uint4 *)(device_idx + threadIdx.x * 4);
*(uint4 *)(device_dst + threadIdx.x * 4) = make_uint4(
__ldg(device_src + indexes.x),
__ldg(device_src + indexes.y),
__ldg(device_src + indexes.z),
__ldg(device_src + indexes.w));
}
else
{
for (uint32_t thread_index = threadIdx.x; thread_index < m; thread_index += CUDA_BLOCK_THREADS)
{
device_dst[thread_index] = __ldg(device_src + device_idx[thread_index]);
}
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_scatter_values_uint32_kernel(const uint32_t * RESTRICT device_idx, const uint32_t * RESTRICT device_src, uint32_t * RESTRICT device_dst, uint32_t m)
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 4;
device_idx += block_index; device_src += block_index; m -= block_index;
if (m >= CUDA_BLOCK_THREADS * 4)
{
const uint4 indexes = __ldg((uint4 *)(device_idx + threadIdx.x * 4));
const uint4 values = __ldg((uint4 *)(device_src + threadIdx.x * 4));
device_dst[indexes.x] = values.x;
device_dst[indexes.y] = values.y;
device_dst[indexes.z] = values.z;
device_dst[indexes.w] = values.w;
}
else
{
for (uint32_t thread_index = threadIdx.x; thread_index < m; thread_index += CUDA_BLOCK_THREADS)
{
device_dst[__ldg(device_idx + thread_index)] = __ldg(device_src + thread_index);
}
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_permute_block_values_uint32_kernel(const uint32_t * RESTRICT device_idx, const uint32_t * RESTRICT device_src, uint32_t * RESTRICT device_dst, uint32_t n)
{
__shared__ __align__(32) uint32_t cache[16 * CUDA_BLOCK_THREADS];
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 16;
device_idx += block_index; device_src += block_index; device_dst += block_index; n -= block_index;
if (n >= CUDA_BLOCK_THREADS * 16)
{
{
const uint32_t * RESTRICT thread_idx = device_idx + threadIdx.x * 4;
const uint32_t * RESTRICT thread_src = device_src + threadIdx.x * 4;
uint32_t * RESTRICT thread_cache = cache - block_index;
#pragma unroll
for (uint32_t round = 0; round < 4; round += 1)
{
const uint4 indexes = __ldg((uint4 *)(thread_idx));
const uint4 values = __ldg((uint4 *)(thread_src));
thread_cache[indexes.x] = values.x;
thread_cache[indexes.y] = values.y;
thread_cache[indexes.z] = values.z;
thread_cache[indexes.w] = values.w;
thread_idx += 4 * CUDA_BLOCK_THREADS; thread_src += 4 * CUDA_BLOCK_THREADS;
}
}
__syncthreads();
{
const uint32_t * RESTRICT thread_cache = cache + threadIdx.x * 4;
uint32_t * RESTRICT thread_dst = device_dst + threadIdx.x * 4;
#pragma unroll
for (uint32_t round = 0; round < 4; round += 1)
{
*(uint4 *)(thread_dst) = *(uint4 *)(thread_cache);
thread_cache += 4 * CUDA_BLOCK_THREADS; thread_dst += 4 * CUDA_BLOCK_THREADS;
}
}
}
else
{
{
uint32_t * RESTRICT thread_cache = cache - block_index;
for (uint32_t thread_index = threadIdx.x; thread_index < n; thread_index += CUDA_BLOCK_THREADS)
{
thread_cache[__ldg(device_idx + thread_index)] = __ldg(device_src + thread_index);
}
}
__syncthreads();
{
for (uint32_t thread_index = threadIdx.x; thread_index < n; thread_index += CUDA_BLOCK_THREADS)
{
device_dst[thread_index] = cache[thread_index];
}
}
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_scatter_values_uint64_kernel(const uint32_t * RESTRICT device_idx, const uint64_t * RESTRICT device_src, uint64_t * RESTRICT device_dst, uint32_t m)
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 2;
device_idx += block_index; device_src += block_index; m -= block_index;
if (m >= CUDA_BLOCK_THREADS * 2)
{
const uint2 indexes = __ldg((uint2 *)(device_idx + threadIdx.x * 2));
const ulonglong2 values = __ldg((ulonglong2 *)(device_src + threadIdx.x * 2));
device_dst[indexes.x] = values.x;
device_dst[indexes.y] = values.y;
}
else
{
for (uint32_t thread_index = threadIdx.x; thread_index < m; thread_index += CUDA_BLOCK_THREADS)
{
device_dst[__ldg(device_idx + thread_index)] = __ldg(device_src + thread_index);
}
}
}
static cudaError_t libcubwt_gather_scatter_values_uint32(LIBCUBWT_DEVICE_STORAGE * storage, uint32_t * device_src_idx, uint32_t * device_src, uint32_t * device_dst_idx, uint32_t * device_dst, int64_t m, int64_t n, uint32_t * device_temp1, uint32_t * device_temp2)
{
cudaError_t status = cudaSuccess;
cub::DoubleBuffer<uint32_t> db_src_index_value(device_src_idx, device_temp1);
cub::DoubleBuffer<uint32_t> db_dst_index(device_dst_idx, device_temp2);
int32_t sort_end_bit = 0; while ((n - 1) >= ((int64_t)1 << sort_end_bit)) { sort_end_bit += 1; }
int32_t sort_aligned_bits = (sort_end_bit > storage->device_L2_cache_bits - 2) ? (sort_end_bit - storage->device_L2_cache_bits + 2 + 7) & (-8) : 0;
int32_t sort_start_bit = std::max(0, sort_end_bit - sort_aligned_bits);
if (sort_start_bit < sort_end_bit)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_src_index_value, db_dst_index,
(uint32_t)m,
sort_start_bit, sort_end_bit,
storage->cuda_stream));
}
if (status == cudaSuccess)
{
int64_t n_gather_scatter_blocks = (m + storage->cuda_block_threads * 4 - 1) / (storage->cuda_block_threads * 4);
libcubwt_gather_values_uint32_kernel<<<(uint32_t)n_gather_scatter_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(db_src_index_value.Current(), device_src, db_src_index_value.Current(), (uint32_t)m);
if (sort_start_bit < sort_end_bit)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_dst_index, db_src_index_value,
(uint32_t)m,
sort_start_bit, sort_end_bit,
storage->cuda_stream));
}
if (status == cudaSuccess)
{
libcubwt_scatter_values_uint32_kernel<<<(uint32_t)n_gather_scatter_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(db_dst_index.Current(), db_src_index_value.Current(), device_dst, (uint32_t)m);
}
}
return status;
}
static cudaError_t libcubwt_scatter_values_uint32(LIBCUBWT_DEVICE_STORAGE * storage, uint32_t * device_idx, uint32_t * device_src, uint32_t * device_dst, int64_t m, int64_t n, uint32_t * device_temp1, uint32_t * device_temp2)
{
cudaError_t status = cudaSuccess;
cub::DoubleBuffer<uint32_t> db_index(device_idx, device_temp1);
cub::DoubleBuffer<uint32_t> db_value(device_src, device_temp2);
int32_t sort_end_bit = 0; while ((n - 1) >= ((int64_t)1 << sort_end_bit)) { sort_end_bit += 1; }
int32_t sort_aligned_bits = (sort_end_bit > storage->device_L2_cache_bits - 2) ? (sort_end_bit - storage->device_L2_cache_bits + 2 + 7) & (-8) : 0;
int32_t sort_start_bit = std::max(0, sort_end_bit - sort_aligned_bits);
if (sort_start_bit < sort_end_bit)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_index, db_value,
(uint32_t)m,
sort_start_bit, sort_end_bit,
storage->cuda_stream));
}
if (status == cudaSuccess)
{
int64_t n_scatter_blocks = (m + storage->cuda_block_threads * 4 - 1) / (storage->cuda_block_threads * 4);
libcubwt_scatter_values_uint32_kernel<<<(uint32_t)n_scatter_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(db_index.Current(), db_value.Current(), device_dst, (uint32_t)m);
}
return status;
}
static cudaError_t libcubwt_permute_values_uint32(LIBCUBWT_DEVICE_STORAGE * storage, uint32_t * device_idx, uint32_t * device_src, uint32_t * device_dst, int64_t n, uint32_t * device_temp1, uint32_t * device_temp2)
{
cudaError_t status = cudaSuccess;
cub::DoubleBuffer<uint32_t> db_index(device_idx, device_temp1);
cub::DoubleBuffer<uint32_t> db_value(device_src, device_temp2);
int32_t sort_end_bit = 0; while ((n - 1) >= ((int64_t)1 << sort_end_bit)) { sort_end_bit += 1; }
int32_t sort_aligned_bits = (sort_end_bit > storage->device_L2_cache_bits - 2) ? (sort_end_bit - storage->device_L2_cache_bits + 2 + 7) & (-8) : 0;
int32_t sort_start_bit = std::max(0, sort_end_bit - sort_aligned_bits);
if (sort_start_bit < sort_end_bit)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_index, db_value,
(uint32_t)n,
sort_start_bit, sort_end_bit,
storage->cuda_stream));
}
if (status == cudaSuccess)
{
if (((storage->cuda_block_threads * 16) % ((int64_t)1 << sort_start_bit)) == 0)
{
int64_t n_permute_blocks = (n + storage->cuda_block_threads * 16 - 1) / (storage->cuda_block_threads * 16);
libcubwt_permute_block_values_uint32_kernel<<<(uint32_t)n_permute_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(db_index.Current(), db_value.Current(), device_dst, (uint32_t)n);
}
else
{
int64_t n_scatter_blocks = (n + storage->cuda_block_threads * 4 - 1) / (storage->cuda_block_threads * 4);
libcubwt_scatter_values_uint32_kernel<<<(uint32_t)n_scatter_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(db_index.Current(), db_value.Current(), device_dst, (uint32_t)n);
}
}
return status;
}
static cudaError_t libcubwt_scatter_values_uint64(LIBCUBWT_DEVICE_STORAGE * storage, cub::DoubleBuffer<uint32_t> & db_index, cub::DoubleBuffer<uint64_t> & db_value, int64_t m, int64_t n, int64_t k = 0)
{
cudaError_t status = cudaSuccess;
int32_t sort_end_bit = 0; while ((n - 1) >= ((int64_t)1 << sort_end_bit)) { sort_end_bit += 1; }
int32_t sort_aligned_bits = (sort_end_bit > storage->device_L2_cache_bits - 3) ? (sort_end_bit - storage->device_L2_cache_bits + 3 + 7) & (-8) : 0;
int32_t sort_start_bit = std::max(0, sort_end_bit - sort_aligned_bits);
if (sort_start_bit < sort_end_bit)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_index, db_value,
(uint32_t)m,
sort_start_bit, sort_end_bit,
storage->cuda_stream));
}
if (status == cudaSuccess)
{
int64_t n_scatter_blocks = (m + storage->cuda_block_threads * 2 - 1) / (storage->cuda_block_threads * 2);
libcubwt_scatter_values_uint64_kernel<<<(uint32_t)n_scatter_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(db_index.Current(), db_value.Current(), db_value.Alternate() - k, (uint32_t)m);
db_index.selector ^= 1;
db_value.selector ^= 1;
}
return status;
}
template <bool extra_sentinel_bits>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_initialize_device_arrays_kernel(const uint8_t * RESTRICT device_T, uint32_t * RESTRICT device_SA, uint64_t * RESTRICT device_keys)
{
__shared__ __align__(32) uint4 prefixes[4 * CUDA_BLOCK_THREADS];
{
device_T += blockIdx.x * CUDA_BLOCK_THREADS * 12 + threadIdx.x * 16;
if (threadIdx.x < (12 * CUDA_BLOCK_THREADS + 8 + 15) / 16) { prefixes[threadIdx.x] = __ldg((uint4 *)device_T); }
__syncthreads();
}
{
uint32_t * RESTRICT thread_cache = ((uint32_t *)prefixes) + threadIdx.x * 3;
uint4 * RESTRICT thread_prefixes = ((uint4 * )prefixes) + threadIdx.x * 4;
const uint32_t b0 = thread_cache[0];
const uint32_t b1 = thread_cache[1];
const uint32_t b2 = thread_cache[2];
const uint32_t b3 = thread_cache[3];
const uint32_t b4 = thread_cache[4];
__syncthreads();
thread_prefixes[0] = make_uint4
(
__byte_perm(b1, b2, 0x1234) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b0, b1, 0x1234),
__byte_perm(b1, b2, 0x2345) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b0, b1, 0x2345)
);
thread_prefixes[1] = make_uint4
(
__byte_perm(b2, b3, 0x0123) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b1, b2, 0x0123),
__byte_perm(b2, b3, 0x1234) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b1, b2, 0x1234)
);
thread_prefixes[2] = make_uint4
(
__byte_perm(b2, b3, 0x3456) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b1, b2, 0x3456),
__byte_perm(b3, b4, 0x0123) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b2, b3, 0x0123)
);
thread_prefixes[3] = make_uint4
(
__byte_perm(b3, b4, 0x2345) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b2, b3, 0x2345),
__byte_perm(b3, b4, 0x3456) | (extra_sentinel_bits ? (uint32_t)7 : (uint32_t)1), __byte_perm(b2, b3, 0x3456)
);
__syncwarp();
}
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 8;
{
uint32_t thread_index = block_index + threadIdx.x * 4; device_SA += thread_index;
((uint4 *)device_SA)[0] = make_uint4(thread_index + 0, thread_index + 1, thread_index + 2, thread_index + 3);
thread_index += CUDA_BLOCK_THREADS * 4; device_SA += CUDA_BLOCK_THREADS * 4;
((uint4 *)device_SA)[0] = make_uint4(thread_index + 0, thread_index + 1, thread_index + 2, thread_index + 3);
}
{
device_keys += block_index;
uint4 * RESTRICT thread_prefixes = (uint4 *)prefixes + ((threadIdx.x / CUDA_WARP_THREADS) * CUDA_WARP_THREADS * 4) + (threadIdx.x % CUDA_WARP_THREADS);
uint4 * RESTRICT thread_keys = (uint4 *)device_keys + ((threadIdx.x / CUDA_WARP_THREADS) * CUDA_WARP_THREADS * 4) + (threadIdx.x % CUDA_WARP_THREADS);
thread_keys[0] = thread_prefixes[0]; thread_keys += CUDA_WARP_THREADS; thread_prefixes += CUDA_WARP_THREADS;
thread_keys[0] = thread_prefixes[0]; thread_keys += CUDA_WARP_THREADS; thread_prefixes += CUDA_WARP_THREADS;
thread_keys[0] = thread_prefixes[0]; thread_keys += CUDA_WARP_THREADS; thread_prefixes += CUDA_WARP_THREADS;
thread_keys[0] = thread_prefixes[0];
}
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, 1)
static void libcubwt_set_sentinel_values_kernel(uint8_t * RESTRICT device_T_end, uint64_t * RESTRICT device_keys_end, uint64_t k0, uint64_t k1, uint64_t k2, uint64_t k3, uint64_t k4, uint64_t k5, uint64_t k6, uint64_t k7)
{
device_T_end[0] = 0;
device_T_end[1] = 0;
device_T_end[2] = 0;
device_keys_end[-8] = k0;
device_keys_end[-7] = k1;
device_keys_end[-6] = k2;
device_keys_end[-5] = k3;
device_keys_end[-4] = k4;
device_keys_end[-3] = k5;
device_keys_end[-2] = k6;
device_keys_end[-1] = k7;
}
static cudaError_t libcubwt_initialize_device_arrays(LIBCUBWT_DEVICE_STORAGE * storage, const uint8_t * T, int64_t reduced_n, int64_t expanded_n, int64_t input_n)
{
cudaError_t status = cudaSuccess;
if ((status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(storage->device_T, T, (size_t)input_n, cudaMemcpyHostToDevice, storage->cuda_stream))) == cudaSuccess)
{
int64_t n_initialize_blocks = 1 + (expanded_n / (storage->cuda_block_threads * 12));
bool extra_sentinel_bits = (expanded_n - input_n >= 2) || (T[input_n - 1] == 0);
if (extra_sentinel_bits)
{
libcubwt_initialize_device_arrays_kernel<true><<<(uint32_t)n_initialize_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(storage->device_T, storage->device_SA, storage->device_keys_temp_keys);
}
else
{
libcubwt_initialize_device_arrays_kernel<false><<<(uint32_t)n_initialize_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(storage->device_T, storage->device_SA, storage->device_keys_temp_keys);
}
{
uint64_t c0 = (expanded_n - 11 < input_n) ? T[expanded_n - 11] : (uint64_t)0;
uint64_t c1 = (expanded_n - 10 < input_n) ? T[expanded_n - 10] : (uint64_t)0;
uint64_t c2 = (expanded_n - 9 < input_n) ? T[expanded_n - 9] : (uint64_t)0;
uint64_t c3 = (expanded_n - 8 < input_n) ? T[expanded_n - 8] : (uint64_t)0;
uint64_t c4 = (expanded_n - 7 < input_n) ? T[expanded_n - 7] : (uint64_t)0;
uint64_t c5 = (expanded_n - 6 < input_n) ? T[expanded_n - 6] : (uint64_t)0;
uint64_t c6 = (expanded_n - 5 < input_n) ? T[expanded_n - 5] : (uint64_t)0;
uint64_t c7 = (expanded_n - 4 < input_n) ? T[expanded_n - 4] : (uint64_t)0;
uint64_t c8 = (expanded_n - 3 < input_n) ? T[expanded_n - 3] : (uint64_t)0;
uint64_t c9 = (expanded_n - 2 < input_n) ? T[expanded_n - 2] : (uint64_t)0;
uint64_t ca = (expanded_n - 1 < input_n) ? T[expanded_n - 1] : (uint64_t)0;
uint64_t k0 = (c0 << 56) | (c1 << 48) | (c2 << 40) | (c3 << 32) | (c4 << 24) | (c5 << 16) | (c6 << 8) | (c7 << 0) | (extra_sentinel_bits ? 7 : 1);
uint64_t k1 = (c1 << 56) | (c2 << 48) | (c3 << 40) | (c4 << 32) | (c5 << 24) | (c6 << 16) | (c7 << 8) | (c8 << 0) | (extra_sentinel_bits ? 7 : 1);
uint64_t k2 = (c3 << 56) | (c4 << 48) | (c5 << 40) | (c6 << 32) | (c7 << 24) | (c8 << 16) | (c9 << 8) | (ca << 0) | (extra_sentinel_bits ? 7 : 0);
uint64_t k3 = (c4 << 56) | (c5 << 48) | (c6 << 40) | (c7 << 32) | (c8 << 24) | (c9 << 16) | (ca << 8) | (extra_sentinel_bits ? 6 : 0);
uint64_t k4 = (c6 << 56) | (c7 << 48) | (c8 << 40) | (c9 << 32) | (ca << 24) | (extra_sentinel_bits ? 4 : 0);
uint64_t k5 = (c7 << 56) | (c8 << 48) | (c9 << 40) | (ca << 32) | (extra_sentinel_bits ? 3 : 0);
uint64_t k6 = (c9 << 56) | (ca << 48) | (extra_sentinel_bits ? 1 : 0);
uint64_t k7 = (ca << 56);
libcubwt_set_sentinel_values_kernel<<<1, 1, 0, storage->cuda_stream>>>(storage->device_T + input_n, storage->device_keys_temp_keys + reduced_n, k0, k1, k2, k3, k4, k5, k6, k7);
}
storage->num_unsorted_segments = (uint32_t)1;
storage->num_unsorted_suffixes = (uint32_t)reduced_n;
}
return status;
}
static cudaError_t libcubwt_sort_suffixes_by_prefix(LIBCUBWT_DEVICE_STORAGE * storage, int64_t n)
{
cub::DoubleBuffer<uint64_t> db_keys(storage->device_keys_temp_keys, storage->device_offsets_ISA);
cub::DoubleBuffer<uint32_t> db_SA(storage->device_SA, storage->device_temp_SA);
cudaError_t status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_keys, db_SA,
(uint32_t)n,
0, 64,
storage->cuda_stream));
if (db_keys.selector)
{
std::swap(storage->device_keys_temp_keys, storage->device_offsets_ISA);
std::swap(storage->device_keys, storage->device_offsets);
std::swap(storage->device_temp_keys, storage->device_ISA);
}
if (db_SA.selector)
{
std::swap(storage->device_SA, storage->device_temp_SA);
}
return status;
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_rank_and_segment_suffixes_initialization_kernel(uint32_t * RESTRICT device_SA, uint64_t * RESTRICT device_keys, uint8_t * RESTRICT device_heads, uint4 * RESTRICT device_descriptors_large, uint2 * RESTRICT device_descriptors_small, uint32_t n)
{
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS + threadIdx.x;
device_descriptors_large += thread_index;
device_descriptors_small += thread_index;
device_descriptors_large[0] = make_uint4(0, 0, 0, 0);
device_descriptors_small[0] = make_uint2(0, 0);
if (blockIdx.x == 0)
{
if (threadIdx.x < CUDA_WARP_THREADS)
{
device_descriptors_large[-CUDA_WARP_THREADS] = make_uint4((uint32_t)-1, 0, 0, 0);
device_descriptors_small[-CUDA_WARP_THREADS] = make_uint2((uint32_t)-1, 0);
}
{
uint64_t key = (threadIdx.x % 2 == 0) ? 0 : (uint64_t)-1;
device_SA += threadIdx.x; device_keys += threadIdx.x; device_heads += threadIdx.x;
if (threadIdx.x < 2)
{
device_keys [-2] = key;
device_heads[-2] = 1;
}
device_SA += n; device_keys += n; device_heads += n;
device_SA [0 * CUDA_BLOCK_THREADS] = n + threadIdx.x + 0 * CUDA_BLOCK_THREADS;
device_SA [1 * CUDA_BLOCK_THREADS] = n + threadIdx.x + 1 * CUDA_BLOCK_THREADS;
device_SA [2 * CUDA_BLOCK_THREADS] = n + threadIdx.x + 2 * CUDA_BLOCK_THREADS;
device_SA [3 * CUDA_BLOCK_THREADS] = n + threadIdx.x + 3 * CUDA_BLOCK_THREADS;
device_keys [0 * CUDA_BLOCK_THREADS] = key;
device_keys [1 * CUDA_BLOCK_THREADS] = key;
device_keys [2 * CUDA_BLOCK_THREADS] = key;
device_keys [3 * CUDA_BLOCK_THREADS] = key;
device_heads[0 * CUDA_BLOCK_THREADS] = 1;
device_heads[1 * CUDA_BLOCK_THREADS] = 1;
device_heads[2 * CUDA_BLOCK_THREADS] = 1;
device_heads[3 * CUDA_BLOCK_THREADS] = 1;
}
}
}
template <bool scatter_ranks_directly>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_rank_and_segment_suffixes_initiatory_kernel(
const uint32_t * RESTRICT device_SA,
const uint64_t * RESTRICT device_keys,
uint8_t * RESTRICT device_heads,
uint32_t * RESTRICT device_ISA,
uint32_t * RESTRICT device_offsets_begin,
uint32_t * RESTRICT device_offsets_end,
uint4 * RESTRICT device_descriptors
)
{
__shared__ __align__(32) uint2 warp_state[1 + CUDA_WARP_THREADS];
uint32_t thread_exclusive_suffix_rank;
uint32_t thread_suffix_rank[4];
uint32_t thread_exclusive_segment_index;
uint32_t thread_segment_index[4];
{
__shared__ __align__(32) ulonglong2 cache[1 + 2 * CUDA_BLOCK_THREADS];
{
device_keys += blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 2;
if (threadIdx.x == 0) { cache[0] = __ldg((ulonglong2 *)(device_keys - 2)); }
cache[1 + threadIdx.x + 0 * CUDA_BLOCK_THREADS] = __ldg((ulonglong2 *)(device_keys + 0 * CUDA_BLOCK_THREADS));
cache[1 + threadIdx.x + 1 * CUDA_BLOCK_THREADS] = __ldg((ulonglong2 *)(device_keys + 2 * CUDA_BLOCK_THREADS));
}
__syncthreads();
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 4;
const uint32_t thread_index = block_index + threadIdx.x * 4;
ulonglong2 key_a = cache[2 * threadIdx.x + 0];
ulonglong2 key_b = cache[2 * threadIdx.x + 1];
ulonglong2 key_c = cache[2 * threadIdx.x + 2];
uchar4 thread_new_heads = make_uchar4(
(key_a.y != key_b.x) ? (uint8_t)1 : (uint8_t)0,
(key_b.x != key_b.y) ? (uint8_t)1 : (uint8_t)0,
(key_b.y != key_c.x) ? (uint8_t)1 : (uint8_t)0,
(key_c.x != key_c.y) ? (uint8_t)1 : (uint8_t)0);
*(uchar4 *)(device_heads + thread_index) = thread_new_heads;
thread_suffix_rank[0] = (thread_new_heads.x != 0) ? (thread_index + 0) : 0;
thread_suffix_rank[1] = (thread_new_heads.y != 0) ? (thread_index + 1) : thread_suffix_rank[0];
thread_suffix_rank[2] = (thread_new_heads.z != 0) ? (thread_index + 2) : thread_suffix_rank[1];
thread_suffix_rank[3] = (thread_new_heads.w != 0) ? (thread_index + 3) : thread_suffix_rank[2];
thread_segment_index[0] = ((thread_new_heads.x != 0) && (key_a.x == key_a.y));
thread_segment_index[1] = thread_segment_index[0] + ((thread_new_heads.y != 0) && (thread_new_heads.x == 0));
thread_segment_index[2] = thread_segment_index[1] + ((thread_new_heads.z != 0) && (thread_new_heads.y == 0));
thread_segment_index[3] = thread_segment_index[2] + ((thread_new_heads.w != 0) && (thread_new_heads.z == 0));
}
}
{
uint32_t thread_inclusive_suffix_rank;
uint32_t thread_inclusive_segment_index;
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage[CUDA_WARP_THREADS];
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_suffix_rank[3] , thread_inclusive_suffix_rank , thread_exclusive_suffix_rank , (uint32_t)0, cub::Max());
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_segment_index[3], thread_inclusive_segment_index, thread_exclusive_segment_index, (uint32_t)0, cub::Sum());
if ((threadIdx.x % CUDA_WARP_THREADS) == (CUDA_WARP_THREADS - 1))
{
warp_state[threadIdx.x / CUDA_WARP_THREADS] = make_uint2(thread_inclusive_suffix_rank, thread_inclusive_segment_index);
}
__syncthreads();
}
{
if (threadIdx.x < CUDA_WARP_THREADS)
{
uint32_t block_exclusive_suffix_rank = 0;
uint32_t block_exclusive_segment_index = 0;
uint32_t warp_inclusive_suffix_rank;
uint32_t warp_inclusive_segment_index;
{
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage;
uint2 warp_inclusive_state = warp_state[threadIdx.x];
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state.x, warp_inclusive_suffix_rank , cub::Max());
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state.y, warp_inclusive_segment_index, cub::Sum());
}
{
const uint32_t descriptor_status_aggregate_not_ready = 0;
const uint32_t descriptor_status_partial_aggregate_ready = 1;
const uint32_t descriptor_status_full_aggregate_ready = 4;
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint4(descriptor_status_partial_aggregate_ready, 0, warp_inclusive_suffix_rank, warp_inclusive_segment_index));
}
{
uint4 * RESTRICT descriptors_lookback = device_descriptors + blockIdx.x + threadIdx.x;
int32_t full_aggregate_lane, delay = 8;
do
{
descriptors_lookback -= CUDA_WARP_THREADS;
uint4 block_descriptor;
do
{
libcubwt_delay_or_prevent_hoisting(delay <<= 1);
block_descriptor = cub::ThreadLoad<cub::LOAD_CG>(descriptors_lookback);
} while (__any_sync((uint32_t)-1, block_descriptor.x == descriptor_status_aggregate_not_ready));
delay = 0;
{
full_aggregate_lane = 31 - __clz((int32_t)__ballot_sync((uint32_t)-1, block_descriptor.x != descriptor_status_partial_aggregate_ready));
block_descriptor.z = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.z : 0;
block_descriptor.w = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.w : 0;
}
{
block_exclusive_suffix_rank = cub::Max()(block_exclusive_suffix_rank , libcubwt_warp_reduce_max(block_descriptor.z));
block_exclusive_segment_index = cub::Sum()(block_exclusive_segment_index, libcubwt_warp_reduce_sum(block_descriptor.w));
}
} while (full_aggregate_lane == -1);
warp_inclusive_suffix_rank = cub::Max()(warp_inclusive_suffix_rank , block_exclusive_suffix_rank );
warp_inclusive_segment_index = cub::Sum()(warp_inclusive_segment_index, block_exclusive_segment_index);
}
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint4(descriptor_status_full_aggregate_ready, 0, warp_inclusive_suffix_rank, warp_inclusive_segment_index));
}
}
{
if (threadIdx.x == 0)
{
warp_state[0] = make_uint2(block_exclusive_suffix_rank, block_exclusive_segment_index);
}
warp_state[1 + threadIdx.x] = make_uint2(warp_inclusive_suffix_rank, warp_inclusive_segment_index);
}
}
__syncthreads();
}
{
uint2 warp_exclusive_state = warp_state[threadIdx.x / CUDA_WARP_THREADS];
thread_exclusive_suffix_rank = cub::Max()(thread_exclusive_suffix_rank , warp_exclusive_state.x);
thread_exclusive_segment_index = cub::Sum()(thread_exclusive_segment_index, warp_exclusive_state.y);
thread_suffix_rank[0] = cub::Max()(thread_suffix_rank[0], thread_exclusive_suffix_rank);
thread_suffix_rank[1] = cub::Max()(thread_suffix_rank[1], thread_exclusive_suffix_rank);
thread_suffix_rank[2] = cub::Max()(thread_suffix_rank[2], thread_exclusive_suffix_rank);
thread_suffix_rank[3] = cub::Max()(thread_suffix_rank[3], thread_exclusive_suffix_rank);
thread_segment_index[0] = cub::Sum()(thread_segment_index[0], thread_exclusive_segment_index);
thread_segment_index[1] = cub::Sum()(thread_segment_index[1], thread_exclusive_segment_index);
thread_segment_index[2] = cub::Sum()(thread_segment_index[2], thread_exclusive_segment_index);
thread_segment_index[3] = cub::Sum()(thread_segment_index[3], thread_exclusive_segment_index);
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
if (thread_exclusive_segment_index != thread_segment_index[0]) { device_offsets_begin[thread_segment_index[0]] = thread_exclusive_suffix_rank; device_offsets_end[thread_segment_index[0]] = thread_index + 0; }
if (thread_segment_index[0] != thread_segment_index[1]) { device_offsets_begin[thread_segment_index[1]] = thread_suffix_rank[0]; device_offsets_end[thread_segment_index[1]] = thread_index + 1; }
if (thread_segment_index[1] != thread_segment_index[2]) { device_offsets_begin[thread_segment_index[2]] = thread_suffix_rank[1]; device_offsets_end[thread_segment_index[2]] = thread_index + 2; }
if (thread_segment_index[2] != thread_segment_index[3]) { device_offsets_begin[thread_segment_index[3]] = thread_suffix_rank[2]; device_offsets_end[thread_segment_index[3]] = thread_index + 3; }
if (scatter_ranks_directly)
{
const uint4 indexes = __ldg((uint4 *)(device_SA + thread_index));
device_ISA[indexes.x] = thread_suffix_rank[0];
device_ISA[indexes.y] = thread_suffix_rank[1];
device_ISA[indexes.z] = thread_suffix_rank[2];
device_ISA[indexes.w] = thread_suffix_rank[3];
}
else
{
*(uint4 *)(device_ISA + thread_index) = make_uint4(thread_suffix_rank[0], thread_suffix_rank[1], thread_suffix_rank[2], thread_suffix_rank[3]);
}
}
}
template <bool alternate_block_descriptor_statuses, bool scatter_ranks_directly>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_rank_and_segment_suffixes_incremental_kernel(
const uint32_t * RESTRICT device_SA,
const uint32_t * RESTRICT device_keys,
uint8_t * RESTRICT device_heads,
uint32_t * RESTRICT device_out_SA,
uint32_t * RESTRICT device_out_ISA,
uint32_t * RESTRICT device_offsets_begin,
uint32_t * RESTRICT device_offsets_end,
uint4 * RESTRICT device_descriptors,
const uint4 * RESTRICT device_descriptors_copy
)
{
__shared__ __align__(32) uint4 warp_state1[1 + CUDA_WARP_THREADS];
__shared__ __align__(32) uint32_t warp_state2[1 + CUDA_WARP_THREADS];
uchar4 thread_old_heads;
uint32_t thread_exclusive_suffix_old_rank;
uchar4 thread_new_heads;
uint32_t thread_exclusive_suffix_new_rank;
uint32_t thread_exclusive_segment_index;
uint32_t thread_segment_index[4];
uint32_t thread_exclusive_suffix_index;
uint32_t thread_suffix_index[4];
{
const uint32_t block_index = blockIdx.x * CUDA_BLOCK_THREADS * 4;
const uint32_t thread_index = block_index + threadIdx.x * 4;
device_keys += thread_index; device_heads += thread_index;
uint2 key_a = __ldg((uint2 *)(device_keys - 2));
uint4 key_b = __ldg((uint4 *)(device_keys));
thread_old_heads = *(uchar4 *)(device_heads);
thread_new_heads = make_uchar4(
(key_a.y != key_b.x) ? (uint8_t)1 : (uint8_t)thread_old_heads.x,
(key_b.x != key_b.y) ? (uint8_t)1 : (uint8_t)thread_old_heads.y,
(key_b.y != key_b.z) ? (uint8_t)1 : (uint8_t)thread_old_heads.z,
(key_b.z != key_b.w) ? (uint8_t)1 : (uint8_t)thread_old_heads.w);
*(uchar4 *)(device_heads) = thread_new_heads;
thread_exclusive_suffix_old_rank = (thread_old_heads.x != 0) ? (thread_index + 0) : 0;
thread_exclusive_suffix_old_rank = (thread_old_heads.y != 0) ? (thread_index + 1) : thread_exclusive_suffix_old_rank;
thread_exclusive_suffix_old_rank = (thread_old_heads.z != 0) ? (thread_index + 2) : thread_exclusive_suffix_old_rank;
thread_exclusive_suffix_old_rank = (thread_old_heads.w != 0) ? (thread_index + 3) : thread_exclusive_suffix_old_rank;
thread_exclusive_suffix_new_rank = (thread_new_heads.x != 0) ? (thread_index + 0) : 0;
thread_exclusive_suffix_new_rank = (thread_new_heads.y != 0) ? (thread_index + 1) : thread_exclusive_suffix_new_rank;
thread_exclusive_suffix_new_rank = (thread_new_heads.z != 0) ? (thread_index + 2) : thread_exclusive_suffix_new_rank;
thread_exclusive_suffix_new_rank = (thread_new_heads.w != 0) ? (thread_index + 3) : thread_exclusive_suffix_new_rank;
thread_segment_index[0] = ((thread_new_heads.x != 0) && (key_a.x == key_a.y) && (device_heads[-1] == 0));
thread_segment_index[1] = thread_segment_index[0] + ((thread_new_heads.y != 0) && (thread_new_heads.x == 0));
thread_segment_index[2] = thread_segment_index[1] + ((thread_new_heads.z != 0) && (thread_new_heads.y == 0));
thread_segment_index[3] = thread_segment_index[2] + ((thread_new_heads.w != 0) && (thread_new_heads.z == 0));
}
{
uint32_t thread_inclusive_suffix_old_rank;
uint32_t thread_inclusive_suffix_new_rank;
uint32_t thread_inclusive_segment_index;
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage[CUDA_BLOCK_THREADS];
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_exclusive_suffix_old_rank, thread_inclusive_suffix_old_rank, thread_exclusive_suffix_old_rank, (uint32_t)0, cub::Max());
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_exclusive_suffix_new_rank, thread_inclusive_suffix_new_rank, thread_exclusive_suffix_new_rank, (uint32_t)0, cub::Max());
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_segment_index[3] , thread_inclusive_segment_index , thread_exclusive_segment_index , (uint32_t)0, cub::Sum());
if ((threadIdx.x % CUDA_WARP_THREADS) == (CUDA_WARP_THREADS - 1))
{
warp_state1[threadIdx.x / CUDA_WARP_THREADS] = make_uint4(0, thread_inclusive_suffix_old_rank, thread_inclusive_suffix_new_rank, thread_inclusive_segment_index);
}
__syncthreads();
}
{
if (threadIdx.x < CUDA_WARP_THREADS)
{
uint32_t block_exclusive_suffix_new_rank = 0;
uint32_t block_exclusive_segment_index = 0;
uint32_t warp_inclusive_suffix_old_rank;
uint32_t warp_inclusive_suffix_new_rank;
uint32_t warp_inclusive_segment_index;
{
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage;
uint4 warp_inclusive_state = warp_state1[threadIdx.x];
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state.y, warp_inclusive_suffix_old_rank, cub::Max());
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state.z, warp_inclusive_suffix_new_rank, cub::Max());
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state.w, warp_inclusive_segment_index , cub::Sum());
}
{
const uint32_t descriptor_status_aggregate_not_ready = alternate_block_descriptor_statuses ? 4 : 0;
const uint32_t descriptor_status_partial_aggregate_ready = alternate_block_descriptor_statuses ? 3 : 1;
const uint32_t descriptor_status_full_aggregate_ready = scatter_ranks_directly ? (alternate_block_descriptor_statuses ? 0 : 4) : 2;
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint4(descriptor_status_partial_aggregate_ready, 0, warp_inclusive_suffix_new_rank, warp_inclusive_segment_index));
}
{
uint4 * RESTRICT descriptors_lookback = device_descriptors + blockIdx.x + threadIdx.x;
int32_t full_aggregate_lane, delay = 8;
do
{
descriptors_lookback -= CUDA_WARP_THREADS;
uint4 block_descriptor;
do
{
libcubwt_delay_or_prevent_hoisting(delay <<= 1);
block_descriptor = cub::ThreadLoad<cub::LOAD_CG>(descriptors_lookback);
} while (__any_sync((uint32_t)-1, block_descriptor.x == descriptor_status_aggregate_not_ready));
delay = 0;
{
full_aggregate_lane = 31 - __clz((int32_t)__ballot_sync((uint32_t)-1, block_descriptor.x != descriptor_status_partial_aggregate_ready));
block_descriptor.z = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.z : 0;
block_descriptor.w = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.w : 0;
}
{
block_exclusive_suffix_new_rank = cub::Max()(block_exclusive_suffix_new_rank , libcubwt_warp_reduce_max(block_descriptor.z));
block_exclusive_segment_index = cub::Sum()(block_exclusive_segment_index , libcubwt_warp_reduce_sum(block_descriptor.w));
}
} while (full_aggregate_lane == -1);
warp_inclusive_suffix_new_rank = cub::Max()(warp_inclusive_suffix_new_rank, block_exclusive_suffix_new_rank);
warp_inclusive_segment_index = cub::Sum()(warp_inclusive_segment_index , block_exclusive_segment_index );
}
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint4(descriptor_status_full_aggregate_ready, 0, warp_inclusive_suffix_new_rank, warp_inclusive_segment_index));
}
}
{
uint32_t block_exclusive_suffix_old_rank = __ldg((uint32_t *)(device_descriptors_copy + blockIdx.x - 1) + 2);
warp_inclusive_suffix_old_rank = cub::Max()(warp_inclusive_suffix_old_rank, block_exclusive_suffix_old_rank);
if (threadIdx.x == 0)
{
warp_state1[0] = make_uint4(0, block_exclusive_suffix_old_rank, block_exclusive_suffix_new_rank, block_exclusive_segment_index);
}
warp_state1[1 + threadIdx.x] = make_uint4(0, warp_inclusive_suffix_old_rank, warp_inclusive_suffix_new_rank, warp_inclusive_segment_index);
}
}
__syncthreads();
}
{
uint32_t thread_suffix_old_rank[4];
uint32_t thread_suffix_new_rank[4];
uint4 warp_exclusive_state = warp_state1[threadIdx.x / CUDA_WARP_THREADS];
thread_exclusive_suffix_old_rank = cub::Max()(thread_exclusive_suffix_old_rank, warp_exclusive_state.y);
thread_exclusive_suffix_new_rank = cub::Max()(thread_exclusive_suffix_new_rank, warp_exclusive_state.z);
thread_exclusive_segment_index = cub::Sum()(thread_exclusive_segment_index , warp_exclusive_state.w);
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
thread_suffix_old_rank[0] = (thread_old_heads.x != 0) ? (thread_index + 0) : thread_exclusive_suffix_old_rank;
thread_suffix_old_rank[1] = (thread_old_heads.y != 0) ? (thread_index + 1) : thread_suffix_old_rank[0];
thread_suffix_old_rank[2] = (thread_old_heads.z != 0) ? (thread_index + 2) : thread_suffix_old_rank[1];
thread_suffix_old_rank[3] = (thread_old_heads.w != 0) ? (thread_index + 3) : thread_suffix_old_rank[2];
thread_suffix_new_rank[0] = (thread_new_heads.x != 0) ? (thread_index + 0) : thread_exclusive_suffix_new_rank;
thread_suffix_new_rank[1] = (thread_new_heads.y != 0) ? (thread_index + 1) : thread_suffix_new_rank[0];
thread_suffix_new_rank[2] = (thread_new_heads.z != 0) ? (thread_index + 2) : thread_suffix_new_rank[1];
thread_suffix_new_rank[3] = (thread_new_heads.w != 0) ? (thread_index + 3) : thread_suffix_new_rank[2];
thread_segment_index[0] = cub::Sum()(thread_segment_index[0], thread_exclusive_segment_index);
thread_segment_index[1] = cub::Sum()(thread_segment_index[1], thread_exclusive_segment_index);
thread_segment_index[2] = cub::Sum()(thread_segment_index[2], thread_exclusive_segment_index);
thread_segment_index[3] = cub::Sum()(thread_segment_index[3], thread_exclusive_segment_index);
if (thread_exclusive_segment_index != thread_segment_index[0]) { device_offsets_begin[thread_segment_index[0]] = thread_exclusive_suffix_new_rank; device_offsets_end[thread_segment_index[0]] = thread_index + 0; }
if (thread_segment_index[0] != thread_segment_index[1]) { device_offsets_begin[thread_segment_index[1]] = thread_suffix_new_rank[0]; device_offsets_end[thread_segment_index[1]] = thread_index + 1; }
if (thread_segment_index[1] != thread_segment_index[2]) { device_offsets_begin[thread_segment_index[2]] = thread_suffix_new_rank[1]; device_offsets_end[thread_segment_index[2]] = thread_index + 2; }
if (thread_segment_index[2] != thread_segment_index[3]) { device_offsets_begin[thread_segment_index[3]] = thread_suffix_new_rank[2]; device_offsets_end[thread_segment_index[3]] = thread_index + 3; }
if (scatter_ranks_directly)
{
const uint4 indexes = __ldg((uint4 *)(device_SA + thread_index));
if (thread_suffix_old_rank[0] != thread_suffix_new_rank[0]) { device_out_ISA[indexes.x] = thread_suffix_new_rank[0]; }
if (thread_suffix_old_rank[1] != thread_suffix_new_rank[1]) { device_out_ISA[indexes.y] = thread_suffix_new_rank[1]; }
if (thread_suffix_old_rank[2] != thread_suffix_new_rank[2]) { device_out_ISA[indexes.z] = thread_suffix_new_rank[2]; }
if (thread_suffix_old_rank[3] != thread_suffix_new_rank[3]) { device_out_ISA[indexes.w] = thread_suffix_new_rank[3]; }
}
else
{
thread_suffix_index[0] = (thread_suffix_old_rank[0] != thread_suffix_new_rank[0]);
thread_suffix_index[1] = thread_suffix_index[0] + (thread_suffix_old_rank[1] != thread_suffix_new_rank[1]);
thread_suffix_index[2] = thread_suffix_index[1] + (thread_suffix_old_rank[2] != thread_suffix_new_rank[2]);
thread_suffix_index[3] = thread_suffix_index[2] + (thread_suffix_old_rank[3] != thread_suffix_new_rank[3]);
}
}
if (!scatter_ranks_directly)
{
{
uint32_t thread_inclusive_suffix_index;
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage[CUDA_WARP_THREADS];
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_suffix_index[3], thread_inclusive_suffix_index, thread_exclusive_suffix_index, (uint32_t)0, cub::Sum());
if ((threadIdx.x % CUDA_WARP_THREADS) == (CUDA_WARP_THREADS - 1))
{
warp_state2[threadIdx.x / CUDA_WARP_THREADS] = thread_inclusive_suffix_index;
}
__syncthreads();
}
{
if (threadIdx.x < CUDA_WARP_THREADS)
{
uint32_t block_exclusive_suffix_index = 0;
uint32_t warp_inclusive_suffix_index;
{
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage;
uint32_t warp_inclusive_state = warp_state2[threadIdx.x];
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state, warp_inclusive_suffix_index, cub::Sum());
}
{
const uint32_t descriptor_status_aggregate_not_ready = alternate_block_descriptor_statuses ? 2 : 2;
const uint32_t descriptor_status_partial_aggregate_ready = alternate_block_descriptor_statuses ? 1 : 3;
const uint32_t descriptor_status_full_aggregate_ready = alternate_block_descriptor_statuses ? 0 : 4;
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>((uint2 *)(device_descriptors + blockIdx.x), make_uint2(descriptor_status_partial_aggregate_ready, warp_inclusive_suffix_index));
}
{
uint4 * RESTRICT descriptors_lookback = device_descriptors + blockIdx.x + threadIdx.x;
int32_t full_aggregate_lane, delay = 8;
do
{
descriptors_lookback -= CUDA_WARP_THREADS;
uint2 block_descriptor;
do
{
libcubwt_delay_or_prevent_hoisting(delay <<= 1);
block_descriptor = cub::ThreadLoad<cub::LOAD_CG>((uint2 *)descriptors_lookback);
} while (__any_sync((uint32_t)-1, alternate_block_descriptor_statuses
? ((int32_t )block_descriptor.x >= (int32_t )descriptor_status_aggregate_not_ready)
: ((uint32_t)block_descriptor.x <= (uint32_t)descriptor_status_aggregate_not_ready)));
delay = 0;
{
full_aggregate_lane = 31 - __clz((int32_t)__ballot_sync((uint32_t)-1, block_descriptor.x != descriptor_status_partial_aggregate_ready));
block_descriptor.y = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.y : 0;
}
{
block_exclusive_suffix_index = cub::Sum()(block_exclusive_suffix_index, libcubwt_warp_reduce_sum(block_descriptor.y));
}
} while (full_aggregate_lane == -1);
warp_inclusive_suffix_index = cub::Sum()(warp_inclusive_suffix_index, block_exclusive_suffix_index);
}
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>((uint2 *)(device_descriptors + blockIdx.x), make_uint2(descriptor_status_full_aggregate_ready, warp_inclusive_suffix_index));
}
}
{
if (threadIdx.x == 0)
{
warp_state2[0] = block_exclusive_suffix_index;
}
warp_state2[1 + threadIdx.x] = warp_inclusive_suffix_index;
}
}
__syncthreads();
}
{
if (thread_suffix_index[3] > 0)
{
uint32_t thread_suffix_new_rank[4];
uint32_t warp_exclusive_state = warp_state2[threadIdx.x / CUDA_WARP_THREADS];
thread_exclusive_suffix_index = cub::Sum()(thread_exclusive_suffix_index, warp_exclusive_state);
thread_suffix_index[0] = cub::Sum()(thread_suffix_index[0], thread_exclusive_suffix_index);
thread_suffix_index[1] = cub::Sum()(thread_suffix_index[1], thread_exclusive_suffix_index);
thread_suffix_index[2] = cub::Sum()(thread_suffix_index[2], thread_exclusive_suffix_index);
thread_suffix_index[3] = cub::Sum()(thread_suffix_index[3], thread_exclusive_suffix_index);
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
const uint4 indexes = __ldg((uint4 *)(device_SA + thread_index));
thread_suffix_new_rank[0] = (thread_new_heads.x != 0) ? (thread_index + 0) : thread_exclusive_suffix_new_rank;
thread_suffix_new_rank[1] = (thread_new_heads.y != 0) ? (thread_index + 1) : thread_suffix_new_rank[0];
thread_suffix_new_rank[2] = (thread_new_heads.z != 0) ? (thread_index + 2) : thread_suffix_new_rank[1];
thread_suffix_new_rank[3] = (thread_new_heads.w != 0) ? (thread_index + 3) : thread_suffix_new_rank[2];
if (thread_exclusive_suffix_index != thread_suffix_index[0]) { device_out_SA[thread_suffix_index[0]] = indexes.x; device_out_ISA[thread_suffix_index[0]] = thread_suffix_new_rank[0]; }
if (thread_suffix_index[0] != thread_suffix_index[1]) { device_out_SA[thread_suffix_index[1]] = indexes.y; device_out_ISA[thread_suffix_index[1]] = thread_suffix_new_rank[1]; }
if (thread_suffix_index[1] != thread_suffix_index[2]) { device_out_SA[thread_suffix_index[2]] = indexes.z; device_out_ISA[thread_suffix_index[2]] = thread_suffix_new_rank[2]; }
if (thread_suffix_index[2] != thread_suffix_index[3]) { device_out_SA[thread_suffix_index[3]] = indexes.w; device_out_ISA[thread_suffix_index[3]] = thread_suffix_new_rank[3]; }
}
}
}
}
static cudaError_t libcubwt_rank_and_segment_suffixes(LIBCUBWT_DEVICE_STORAGE * storage, int64_t n, int64_t iteration)
{
cudaError_t status = cudaSuccess;
int64_t n_segmentation_blocks = 1 + (n / (storage->cuda_block_threads * 4));
int64_t n_initialization_blocks = (n_segmentation_blocks + storage->cuda_block_threads - 1) / storage->cuda_block_threads;
bool scatter_ranks_directly = (n <= ((int64_t)1 << (storage->device_L2_cache_bits - 3)));
if (iteration == 0)
{
libcubwt_rank_and_segment_suffixes_initialization_kernel<<<(uint32_t)n_initialization_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_SA,
storage->device_keys_temp_keys,
storage->device_heads,
storage->device_descriptors_large,
storage->device_descriptors_small,
(uint32_t)n);
if (scatter_ranks_directly)
{
libcubwt_rank_and_segment_suffixes_initiatory_kernel<true><<<(uint32_t)n_segmentation_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_SA,
storage->device_keys_temp_keys,
storage->device_heads,
storage->device_ISA,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large);
}
else
{
libcubwt_rank_and_segment_suffixes_initiatory_kernel<false><<<(uint32_t)n_segmentation_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
NULL,
storage->device_keys_temp_keys,
storage->device_heads,
storage->device_temp_ISA,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large);
}
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(storage->host_pinned_storage, &storage->device_descriptors_large[n_segmentation_blocks - 1], sizeof(uint4), cudaMemcpyDeviceToHost, storage->cuda_stream));
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaStreamSynchronize(storage->cuda_stream), status);
if (status == cudaSuccess)
{
storage->num_unsorted_segments = ((uint4 *)storage->host_pinned_storage)->w;
if (!scatter_ranks_directly)
{
if ((status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(storage->device_temp_SA, storage->device_SA, n * sizeof(uint32_t), cudaMemcpyDeviceToDevice, storage->cuda_stream))) == cudaSuccess)
{
status = libcubwt_permute_values_uint32(storage, storage->device_temp_SA, storage->device_temp_ISA, storage->device_ISA, n, storage->device_keys, storage->device_temp_keys);
}
}
}
}
else
{
if ((status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(storage->device_descriptors_copy - 1, storage->device_descriptors_large - 1, n_segmentation_blocks * sizeof(uint4), cudaMemcpyDeviceToDevice, storage->cuda_stream))) == cudaSuccess)
{
if (scatter_ranks_directly)
{
if ((iteration % 2) == 0)
{
libcubwt_rank_and_segment_suffixes_incremental_kernel<false, true><<<(uint32_t)n_segmentation_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_SA,
storage->device_keys,
storage->device_heads,
NULL, storage->device_ISA,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large, storage->device_descriptors_copy);
}
else
{
libcubwt_rank_and_segment_suffixes_incremental_kernel<true, true><<<(uint32_t)n_segmentation_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_SA,
storage->device_keys,
storage->device_heads,
NULL, storage->device_ISA,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large, storage->device_descriptors_copy);
}
}
else
{
if ((iteration % 2) == 0)
{
libcubwt_rank_and_segment_suffixes_incremental_kernel<false, false><<<(uint32_t)n_segmentation_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_SA,
storage->device_keys,
storage->device_heads,
storage->device_temp_SA - 1, storage->device_temp_ISA - 1,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large, storage->device_descriptors_copy);
}
else
{
libcubwt_rank_and_segment_suffixes_incremental_kernel<true, false><<<(uint32_t)n_segmentation_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_SA,
storage->device_keys,
storage->device_heads,
storage->device_temp_SA - 1, storage->device_temp_ISA - 1,
storage->device_offsets - 1, storage->device_offsets + (n / 2) - 1,
storage->device_descriptors_large, storage->device_descriptors_copy);
}
}
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(storage->host_pinned_storage, &storage->device_descriptors_large[n_segmentation_blocks - 1], sizeof(uint4), cudaMemcpyDeviceToHost, storage->cuda_stream));
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaStreamSynchronize(storage->cuda_stream), status);
if (status == cudaSuccess)
{
storage->num_unsorted_segments = ((uint4 *)storage->host_pinned_storage)->w;
if (!scatter_ranks_directly)
{
uint32_t num_updated_suffixes = ((uint4 *)storage->host_pinned_storage)->y;
if (num_updated_suffixes > 0)
{
status = libcubwt_scatter_values_uint32(storage, storage->device_temp_SA, storage->device_temp_ISA, storage->device_ISA, num_updated_suffixes, n, storage->device_keys, storage->device_temp_keys);
}
}
}
}
}
return status;
}
template <bool alternate_block_descriptor_statuses>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_gather_unsorted_suffixes_kernel(
const uint8_t * RESTRICT device_heads,
const uint32_t * RESTRICT device_SA,
uint32_t * RESTRICT device_out_keys,
uint32_t * RESTRICT device_out_SA,
uint2 * RESTRICT device_descriptors)
{
__shared__ __align__(32) uint32_t warp_state[1 + CUDA_WARP_THREADS];
uint32_t thread_exclusive_suffix_index;
uint32_t thread_suffix_index[4];
{
device_heads += blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
const uchar4 current_heads = __ldg((uchar4 *)(device_heads));
const uint8_t next_head = current_heads.w > 0 ? __ldg(device_heads + 4) : 0;
thread_suffix_index[0] = (current_heads.x + current_heads.y < 2);
thread_suffix_index[1] = thread_suffix_index[0] + (current_heads.y + current_heads.z < 2);
thread_suffix_index[2] = thread_suffix_index[1] + (current_heads.z + current_heads.w < 2);
thread_suffix_index[3] = thread_suffix_index[2] + (current_heads.w + next_head < 2);
}
{
uint32_t thread_inclusive_suffix_index;
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage[CUDA_WARP_THREADS];
WarpScan(warp_scan_storage[threadIdx.x / CUDA_WARP_THREADS]).Scan(thread_suffix_index[3], thread_inclusive_suffix_index, thread_exclusive_suffix_index, (uint32_t)0, cub::Sum());
if ((threadIdx.x % CUDA_WARP_THREADS) == (CUDA_WARP_THREADS - 1))
{
warp_state[threadIdx.x / CUDA_WARP_THREADS] = thread_inclusive_suffix_index;
}
__syncthreads();
}
{
if (threadIdx.x < CUDA_WARP_THREADS)
{
uint32_t block_exclusive_suffix_index = 0;
uint32_t warp_inclusive_suffix_index;
{
typedef cub::WarpScan<uint32_t> WarpScan;
__shared__ typename WarpScan::TempStorage warp_scan_storage;
uint32_t warp_inclusive_state = warp_state[threadIdx.x];
WarpScan(warp_scan_storage).InclusiveScan(warp_inclusive_state, warp_inclusive_suffix_index, cub::Sum());
}
{
const uint32_t descriptor_status_aggregate_not_ready = alternate_block_descriptor_statuses ? 2 : 0;
const uint32_t descriptor_status_partial_aggregate_ready = alternate_block_descriptor_statuses ? 1 : 1;
const uint32_t descriptor_status_full_aggregate_ready = alternate_block_descriptor_statuses ? 0 : 2;
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint2(descriptor_status_partial_aggregate_ready, warp_inclusive_suffix_index));
}
{
uint2 * RESTRICT descriptors_lookback = device_descriptors + blockIdx.x + threadIdx.x;
int32_t full_aggregate_lane, delay = 8;
do
{
descriptors_lookback -= CUDA_WARP_THREADS;
uint2 block_descriptor;
do
{
libcubwt_delay_or_prevent_hoisting(delay <<= 1);
block_descriptor = cub::ThreadLoad<cub::LOAD_CG>(descriptors_lookback);
} while (__any_sync((uint32_t)-1, block_descriptor.x == descriptor_status_aggregate_not_ready));
delay = 0;
{
full_aggregate_lane = 31 - __clz((int32_t)__ballot_sync((uint32_t)-1, block_descriptor.x != descriptor_status_partial_aggregate_ready));
block_descriptor.y = (((int32_t)threadIdx.x) >= full_aggregate_lane) ? block_descriptor.y : 0;
}
{
block_exclusive_suffix_index = cub::Sum()(block_exclusive_suffix_index, libcubwt_warp_reduce_sum(block_descriptor.y));
}
} while (full_aggregate_lane == -1);
warp_inclusive_suffix_index = cub::Sum()(warp_inclusive_suffix_index, block_exclusive_suffix_index);
}
if (threadIdx.x == ((CUDA_BLOCK_THREADS / CUDA_WARP_THREADS) - 1))
{
cub::ThreadStore<cub::STORE_CG>(device_descriptors + blockIdx.x, make_uint2(descriptor_status_full_aggregate_ready, warp_inclusive_suffix_index));
}
}
{
if (threadIdx.x == 0)
{
warp_state[0] = block_exclusive_suffix_index;
}
warp_state[1 + threadIdx.x] = warp_inclusive_suffix_index;
}
}
__syncthreads();
}
{
if (thread_suffix_index[3] > 0)
{
uint32_t warp_exclusive_state = warp_state[threadIdx.x / CUDA_WARP_THREADS];
thread_exclusive_suffix_index = cub::Sum()(thread_exclusive_suffix_index, warp_exclusive_state);
thread_suffix_index[0] = cub::Sum()(thread_suffix_index[0], thread_exclusive_suffix_index);
thread_suffix_index[1] = cub::Sum()(thread_suffix_index[1], thread_exclusive_suffix_index);
thread_suffix_index[2] = cub::Sum()(thread_suffix_index[2], thread_exclusive_suffix_index);
thread_suffix_index[3] = cub::Sum()(thread_suffix_index[3], thread_exclusive_suffix_index);
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
const uint4 indexes = __ldg((uint4 *)(device_SA + thread_index));
if (thread_exclusive_suffix_index != thread_suffix_index[0]) { device_out_keys[thread_suffix_index[0]] = thread_index + 0; device_out_SA[thread_suffix_index[0]] = indexes.x; }
if (thread_suffix_index[0] != thread_suffix_index[1]) { device_out_keys[thread_suffix_index[1]] = thread_index + 1; device_out_SA[thread_suffix_index[1]] = indexes.y; }
if (thread_suffix_index[1] != thread_suffix_index[2]) { device_out_keys[thread_suffix_index[2]] = thread_index + 2; device_out_SA[thread_suffix_index[2]] = indexes.z; }
if (thread_suffix_index[2] != thread_suffix_index[3]) { device_out_keys[thread_suffix_index[3]] = thread_index + 3; device_out_SA[thread_suffix_index[3]] = indexes.w; }
}
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_update_suffix_sorting_keys_kernel(const uint8_t * RESTRICT device_heads, const uint32_t * RESTRICT device_SA, const uint32_t * RESTRICT device_ISA, uint32_t * RESTRICT device_keys)
{
const uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
device_heads += thread_index;
const uchar4 current_heads = __ldg((uchar4 *)(device_heads));
const uint8_t next_head = current_heads.w > 0 ? __ldg(device_heads + 4) : 0;
if (current_heads.x + current_heads.y + current_heads.z + current_heads.w + next_head < 5)
{
device_SA += thread_index; device_keys += thread_index;
const uint4 current_SA = __ldg((uint4 *)(device_SA));
((uint4 *)device_keys)[0] = make_uint4(
(current_heads.x + current_heads.y < 2) ? __ldg(device_ISA + current_SA.x) : (uint32_t)-1,
(current_heads.y + current_heads.z < 2) ? __ldg(device_ISA + current_SA.y) : (uint32_t)-2,
(current_heads.z + current_heads.w < 2) ? __ldg(device_ISA + current_SA.z) : (uint32_t)-3,
(current_heads.w + next_head < 2) ? __ldg(device_ISA + current_SA.w) : (uint32_t)-4);
}
}
static cudaError_t libcubwt_update_suffix_sorting_keys(LIBCUBWT_DEVICE_STORAGE * storage, int64_t n, int64_t iteration, int64_t depth)
{
cudaError_t status = cudaSuccess;
int64_t n_ranking_blocks = (n + storage->cuda_block_threads * 4 - 1) / (storage->cuda_block_threads * 4);
bool gather_keys_directly = (n <= ((int64_t)1 << (storage->device_L2_cache_bits - 2))) || (n > ((int64_t)1 << (storage->device_L2_cache_bits - 2 + 8)));
if (gather_keys_directly || (storage->num_unsorted_suffixes <= (n / 4)))
{
libcubwt_update_suffix_sorting_keys_kernel<<<(uint32_t)n_ranking_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(storage->device_heads, storage->device_SA, storage->device_ISA + depth, storage->device_keys);
}
else
{
if ((iteration % 2) == 0)
{
libcubwt_gather_unsorted_suffixes_kernel<false><<<(uint32_t)n_ranking_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_heads,
storage->device_SA,
storage->device_temp_keys - 1, storage->device_temp_SA - 1,
storage->device_descriptors_small);
}
else
{
libcubwt_gather_unsorted_suffixes_kernel<true><<<(uint32_t)n_ranking_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_heads,
storage->device_SA,
storage->device_temp_keys - 1, storage->device_temp_SA - 1,
storage->device_descriptors_small);
}
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(storage->host_pinned_storage, &storage->device_descriptors_small[n_ranking_blocks - 1], sizeof(uint2), cudaMemcpyDeviceToHost, storage->cuda_stream));
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaStreamSynchronize(storage->cuda_stream), status);
if (status == cudaSuccess)
{
storage->num_unsorted_suffixes = ((uint2 *)storage->host_pinned_storage)->y;
if (storage->num_unsorted_suffixes > 0)
{
status = libcubwt_gather_scatter_values_uint32(storage, storage->device_temp_SA, storage->device_ISA + depth, storage->device_temp_keys, storage->device_keys, storage->num_unsorted_suffixes, n, storage->device_temp_ISA, storage->device_keys);
}
}
}
return status;
}
static cudaError_t libcubwt_sort_segmented_suffixes_by_rank(LIBCUBWT_DEVICE_STORAGE * storage, int64_t n)
{
cub::DoubleBuffer<uint32_t> d_keys(storage->device_keys, storage->device_temp_keys);
cub::DoubleBuffer<uint32_t> d_values(storage->device_SA, storage->device_temp_SA);
cudaError_t status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceSegmentedSort::SortPairs(
storage->device_ssort_temp_storage, storage->device_ssort_temp_storage_size,
d_keys, d_values,
(int)storage->num_unsorted_suffixes, (int)storage->num_unsorted_segments,
storage->device_offsets, storage->device_offsets + (n / 2),
storage->cuda_stream));
if (d_keys.selector) { std::swap(storage->device_keys, storage->device_temp_keys); }
if (d_values.selector) { std::swap(storage->device_SA, storage->device_temp_SA); }
return status;
}
template <bool process_auxiliary_indexes>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_prepare_mod12_suffixes_kernel(const uint8_t * RESTRICT device_T, const uint32_t * RESTRICT device_ISA, const uint64_t * RESTRICT device_suffixes, const uint32_t rm, const uint32_t rs)
{
__shared__ union
{
struct
{
__align__(32) uint32_t bytes[4 * CUDA_BLOCK_THREADS];
__align__(32) uint4 ranks[3 * CUDA_BLOCK_THREADS];
} stage1;
struct
{
__align__(32) uint4 suffixes[4 * CUDA_BLOCK_THREADS];
} stage2;
} shared_storage;
{
device_T += blockIdx.x * CUDA_BLOCK_THREADS * 12 + threadIdx.x * 16;
device_ISA += blockIdx.x * CUDA_BLOCK_THREADS * 8 + threadIdx.x * 4;
uint4 * RESTRICT thread_bytes = (uint4 *)shared_storage.stage1.bytes + threadIdx.x;
uint4 * RESTRICT thread_ranks = (uint4 *)shared_storage.stage1.ranks + threadIdx.x;
if (threadIdx.x < (12 * CUDA_BLOCK_THREADS + 4 + 15) / 16) { thread_bytes[0] = __ldg((uint4 *)device_T); }
thread_ranks[0] = __ldg((uint4 *)device_ISA); thread_ranks += CUDA_BLOCK_THREADS; device_ISA += CUDA_BLOCK_THREADS * 4;
thread_ranks[0] = __ldg((uint4 *)device_ISA); thread_ranks += CUDA_BLOCK_THREADS; device_ISA += CUDA_BLOCK_THREADS * 4;
if (threadIdx.x == 0) { thread_ranks[0] = __ldg((uint4 *)device_ISA); }
}
{
__syncthreads();
uint32_t bytes0 = shared_storage.stage1.bytes[threadIdx.x * 3 + 0];
uint32_t bytes1 = shared_storage.stage1.bytes[threadIdx.x * 3 + 1];
uint32_t bytes2 = shared_storage.stage1.bytes[threadIdx.x * 3 + 2];
uint32_t bytes3 = shared_storage.stage1.bytes[threadIdx.x * 3 + 3];
uint4 ranks0 = shared_storage.stage1.ranks[threadIdx.x * 2 + 0];
uint4 ranks1 = shared_storage.stage1.ranks[threadIdx.x * 2 + 1];
uint4 ranks2 = shared_storage.stage1.ranks[threadIdx.x * 2 + 2];
__syncthreads();
uint32_t v4 = 0, v8 = 0;
if (process_auxiliary_indexes)
{
const uint32_t i4 = blockIdx.x * CUDA_BLOCK_THREADS * 12 + threadIdx.x * 12 + 4 + rm + 1;
const uint32_t i8 = blockIdx.x * CUDA_BLOCK_THREADS * 12 + threadIdx.x * 12 + 8 + rm + 1;
if ((i4 & rm) == 0) { v4 = (i4 >> rs) << 24; }
if ((i8 & rm) == 0) { v8 = (i8 >> rs) << 24; }
}
shared_storage.stage2.suffixes[threadIdx.x * 4 + 0] = make_uint4
(
ranks0.y, __byte_perm(bytes0, 0, 0x4021),
ranks0.z | (uint32_t)INT32_MIN, __byte_perm(bytes0, 0, 0x4132)
);
shared_storage.stage2.suffixes[threadIdx.x * 4 + 1] = make_uint4
(
ranks0.w, (__byte_perm(bytes0, bytes1, 0x0354) & 0xffffffu) | v4,
ranks1.x | (uint32_t)INT32_MIN, __byte_perm(bytes1, 0, 0x4021)
);
shared_storage.stage2.suffixes[threadIdx.x * 4 + 2] = make_uint4
(
ranks1.y, __byte_perm(bytes1, bytes2, 0x0243) & 0xffffffu,
ranks1.z | (uint32_t)INT32_MIN, (__byte_perm(bytes1, bytes2, 0x0354) & 0xffffffu) | v8
);
shared_storage.stage2.suffixes[threadIdx.x * 4 + 3] = make_uint4
(
ranks1.w, __byte_perm(bytes2, 0, 0x4132),
ranks2.x | (uint32_t)INT32_MIN, __byte_perm(bytes2, bytes3, 0x0243) & 0xffffffu
);
__syncwarp();
}
{
device_suffixes += blockIdx.x * CUDA_BLOCK_THREADS * 8;
uint4 * RESTRICT thread_src = shared_storage.stage2.suffixes + ((threadIdx.x / CUDA_WARP_THREADS) * CUDA_WARP_THREADS * 4) + (threadIdx.x % CUDA_WARP_THREADS);
uint4 * RESTRICT thread_dst = (uint4 *)device_suffixes + ((threadIdx.x / CUDA_WARP_THREADS) * CUDA_WARP_THREADS * 4) + (threadIdx.x % CUDA_WARP_THREADS);
thread_dst[0] = thread_src[0]; thread_src += CUDA_WARP_THREADS; thread_dst += CUDA_WARP_THREADS;
thread_dst[0] = thread_src[0]; thread_src += CUDA_WARP_THREADS; thread_dst += CUDA_WARP_THREADS;
thread_dst[0] = thread_src[0]; thread_src += CUDA_WARP_THREADS; thread_dst += CUDA_WARP_THREADS;
thread_dst[0] = thread_src[0];
}
}
template <bool process_auxiliary_indexes>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_prepare_mod0_suffixes_kernel(const uint8_t * RESTRICT device_T, const uint32_t * RESTRICT device_ISA, const uint64_t * RESTRICT device_suffixes_lh, const uint32_t * RESTRICT device_suffixes_hh, const uint32_t rm, const uint32_t rs)
{
__shared__ __align__(32) uint16_t bytes[3 * CUDA_BLOCK_THREADS + 8];
{
device_T += blockIdx.x * CUDA_BLOCK_THREADS * 6 + threadIdx.x * 16;
uint4 * RESTRICT thread_bytes = (uint4 *)bytes + threadIdx.x;
if (threadIdx.x <= (6 * CUDA_BLOCK_THREADS) / 16) { thread_bytes[0] = __ldg((uint4 *)(device_T - 16)); }
}
{
device_ISA += blockIdx.x * CUDA_BLOCK_THREADS * 4 + threadIdx.x * 4;
device_suffixes_lh += blockIdx.x * CUDA_BLOCK_THREADS * 2 + threadIdx.x * 2;
device_suffixes_hh += blockIdx.x * CUDA_BLOCK_THREADS * 2 + threadIdx.x * 2;
__syncthreads();
uint32_t bytes0 = bytes[threadIdx.x * 3 + 7 ];
uint32_t bytes1 = bytes[threadIdx.x * 3 + 8 ];
uint32_t bytes2 = bytes[threadIdx.x * 3 + 9 ];
uint32_t bytes3 = bytes[threadIdx.x * 3 + 10];
uint4 ranks = __ldg((uint4 *)(device_ISA));
uint32_t v0 = 0;
if (process_auxiliary_indexes)
{
const uint32_t i0 = blockIdx.x * CUDA_BLOCK_THREADS * 6 + threadIdx.x * 6 + 0 + rm + 1;
if ((i0 & rm) == 0) { v0 = (i0 >> rs) << 24; }
}
else if ((blockIdx.x | threadIdx.x) == 0)
{
v0 = 1u << 24;
}
*(uint4 *)(device_suffixes_lh) = make_uint4
(
ranks.x, __byte_perm(bytes0, bytes1, 0x3154) | v0,
ranks.z, __byte_perm(bytes2, bytes3, 0x3041)
);
*(uint2 *)(device_suffixes_hh) = make_uint2(ranks.y | (uint32_t)INT32_MIN, ranks.w | (uint32_t)INT32_MIN);
}
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, 1)
static void libcubwt_set_sentinel_suffixes_kernel(uint64_t * RESTRICT device_mod0l_suffixes_end, uint32_t * RESTRICT device_mod0h_suffixes_end,uint64_t * RESTRICT device_mod12_suffixes_end)
{
uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS + threadIdx.x;
device_mod0l_suffixes_end += thread_index;
device_mod0h_suffixes_end += thread_index;
device_mod12_suffixes_end += thread_index;
*(uint2 *)(device_mod0l_suffixes_end) = make_uint2(0x7fffffffu - 12 * CUDA_BLOCK_THREADS + 2 * thread_index + 0, 0x00ffffffu);
*(uint32_t *)(device_mod0h_suffixes_end) = (uint32_t)(0xffffffffu - 12 * CUDA_BLOCK_THREADS + 2 * thread_index + 0 );
*(uint2 *)(device_mod12_suffixes_end) = make_uint2(0x7fffffffu - 12 * CUDA_BLOCK_THREADS + 2 * thread_index + 1, 0x00ffffffu);
}
__device__ __forceinline__
bool libcubwt_compare_suffixes_kernel(const uint2 mod0l_suffix, const uint32_t mod0h_suffix, const uint2 mod12_suffix)
{
uint32_t difference = __byte_perm(mod0l_suffix.y, 0, 0x4401) - __byte_perm(mod12_suffix.y, 0, 0x4401);
if (difference == 0) { difference = (((int32_t)mod12_suffix.x < 0) ? mod0h_suffix : mod0l_suffix.x) - mod12_suffix.x; }
return (int32_t)difference <= 0;
}
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_suffixes_merge_initialization_kernel(
const uint64_t * RESTRICT device_mod0l_suffixes,
const uint32_t * RESTRICT device_mod0h_suffixes,
const uint32_t num_mod0_suffixes,
const uint64_t * RESTRICT device_mod12_suffixes,
const uint32_t num_mod12_suffixes,
uint32_t * RESTRICT device_suffixes_merge_path,
uint32_t num_merging_blocks)
{
uint32_t thread_index = blockIdx.x * CUDA_BLOCK_THREADS + threadIdx.x;
if (thread_index <= num_merging_blocks)
{
uint32_t diagonal = thread_index * CUDA_BLOCK_THREADS * 5;
uint32_t begin = (diagonal > num_mod12_suffixes) ? (diagonal - num_mod12_suffixes) : 0;
uint32_t end = (diagonal > num_mod0_suffixes ) ? (num_mod0_suffixes ) : diagonal;
while (begin < end)
{
uint32_t pivot = begin + ((end - begin) >> 1);
bool predicate = libcubwt_compare_suffixes_kernel(
__ldg((uint2 *)(device_mod0l_suffixes + pivot)),
__ldg((uint32_t *)(device_mod0h_suffixes + pivot)),
__ldg((uint2 *)(device_mod12_suffixes + diagonal - pivot - 1)));
begin = predicate ? (pivot + 1) : begin;
end = predicate ? (end ) : pivot;
}
__syncwarp();
device_suffixes_merge_path[thread_index] = begin;
}
}
template <bool process_auxiliary_indexes>
__global__ __launch_bounds__(CUDA_BLOCK_THREADS, CUDA_SM_THREADS / CUDA_BLOCK_THREADS)
static void libcubwt_merge_suffixes_kernel(
const uint64_t * RESTRICT device_mod0l_suffixes,
const uint32_t * RESTRICT device_mod0h_suffixes,
const uint64_t * RESTRICT device_mod12_suffixes,
const uint32_t * RESTRICT device_suffixes_merge_path,
uint32_t * RESTRICT device_auxiliary_indexes,
uint8_t * RESTRICT device_L)
{
__shared__ union
{
struct
{
__align__(32) uint2 suffixes_l[CUDA_BLOCK_THREADS * 5 + 12];
__align__(32) uint32_t suffixes_h[CUDA_BLOCK_THREADS * 5 + 12];
} stage1;
struct
{
__align__(32) uint8_t bwt[CUDA_BLOCK_THREADS * 5];
} stage2;
} shared_storage;
uint32_t num_mod0_suffixes;
uint32_t num_mod12_suffixes;
{
const uint32_t block_mod0_path_begin = (device_suffixes_merge_path + blockIdx.x)[0];
const uint32_t block_mod0_path_end = (device_suffixes_merge_path + blockIdx.x)[1];
num_mod0_suffixes = block_mod0_path_end - block_mod0_path_begin + 6;
num_mod12_suffixes = CUDA_BLOCK_THREADS * 5 + 12 - num_mod0_suffixes;
device_mod0l_suffixes += block_mod0_path_begin;
device_mod0h_suffixes += block_mod0_path_begin;
device_mod12_suffixes += (blockIdx.x * CUDA_BLOCK_THREADS * 5 - block_mod0_path_begin);
device_mod12_suffixes -= num_mod0_suffixes;
#pragma unroll
for (uint32_t thread_index = threadIdx.x; thread_index < CUDA_BLOCK_THREADS * 5 + 12; thread_index += CUDA_BLOCK_THREADS)
{
if (thread_index < num_mod0_suffixes) { shared_storage.stage1.suffixes_h[thread_index] = __ldg(device_mod0h_suffixes + thread_index); }
shared_storage.stage1.suffixes_l[thread_index] = __ldg((uint2 *)(thread_index < num_mod0_suffixes ? device_mod0l_suffixes : device_mod12_suffixes) + thread_index);
}
__syncthreads();
}
{
uint32_t diagonal = threadIdx.x * 5;
uint32_t begin = (diagonal > num_mod12_suffixes) ? (diagonal - num_mod12_suffixes) : 0;
uint32_t end = (diagonal > num_mod0_suffixes ) ? (num_mod0_suffixes ) : diagonal;
while (begin < end)
{
uint32_t pivot = (begin + end) >> 1;
bool predicate = libcubwt_compare_suffixes_kernel(
shared_storage.stage1.suffixes_l[pivot],
shared_storage.stage1.suffixes_h[pivot],
shared_storage.stage1.suffixes_l[num_mod0_suffixes + diagonal - pivot - 1]);
begin = predicate ? (pivot + 1) : begin;
end = predicate ? (end ) : pivot;
}
__syncwarp();
uint32_t suffixes[5];
{
uint32_t mod0_index = begin;
uint32_t mod12_index = num_mod0_suffixes + diagonal - begin;
uint2 mod0l_suffix = shared_storage.stage1.suffixes_l[mod0_index];
uint32_t mod0h_suffix = shared_storage.stage1.suffixes_h[mod0_index];
uint2 mod12_suffix = shared_storage.stage1.suffixes_l[mod12_index];
#pragma unroll
for (uint32_t item = 0; item < 5; ++item)
{
bool predicate = libcubwt_compare_suffixes_kernel(mod0l_suffix, mod0h_suffix, mod12_suffix);
suffixes[item] = predicate ? mod0l_suffix.y : mod12_suffix.y;
if ( predicate) { mod0_index += 1; mod0l_suffix = shared_storage.stage1.suffixes_l[mod0_index]; mod0h_suffix = shared_storage.stage1.suffixes_h[mod0_index]; }
if (!predicate) { mod12_index += 1; mod12_suffix = shared_storage.stage1.suffixes_l[mod12_index]; }
}
__syncthreads();
}
{
#pragma unroll
for (uint32_t item = 0; item < 5; ++item)
{
if (suffixes[item] >= 0x01000000u)
{
device_auxiliary_indexes[process_auxiliary_indexes ? suffixes[item] >> 24 : 1] = blockIdx.x * CUDA_BLOCK_THREADS * 5 + diagonal + item;
}
shared_storage.stage2.bwt[diagonal + item] = (uint8_t)(suffixes[item] >> 16);
}
__syncthreads();
}
}
{
device_L += blockIdx.x * CUDA_BLOCK_THREADS * 5 + threadIdx.x * 16;
if (threadIdx.x < (CUDA_BLOCK_THREADS * 5 / 16)) { ((uint4 *)device_L)[0] = ((uint4 *)shared_storage.stage2.bwt)[threadIdx.x]; }
}
}
static cudaError_t libcubwt_compute_burrows_wheeler_transform(LIBCUBWT_DEVICE_STORAGE * storage, const uint8_t * T, int64_t input_n, int64_t r, uint32_t * I)
{
cudaError_t status = cudaSuccess;
int64_t reduced_n = (input_n / 3) * 2 + 2;
int64_t expanded_n = (reduced_n / 2) * 3 + 0;
int64_t num_indexes = (input_n + r - 1) / r;
if ((status = libcubwt_initialize_device_arrays(storage, T, reduced_n, expanded_n, input_n)) == cudaSuccess)
{
status = libcubwt_sort_suffixes_by_prefix(storage, reduced_n);
}
if (status == cudaSuccess)
{
for (int64_t iteration = 0, depth = 4; true; iteration += 1, depth *= 2)
{
if ((status = libcubwt_rank_and_segment_suffixes(storage, reduced_n, iteration)) != cudaSuccess)
{
break;
}
if (storage->num_unsorted_segments == 0)
{
break;
}
if ((status = libcubwt_update_suffix_sorting_keys(storage, reduced_n, iteration, depth)) != cudaSuccess)
{
break;
}
if ((status = libcubwt_sort_segmented_suffixes_by_rank(storage, reduced_n)) != cudaSuccess)
{
break;
}
}
}
if (status == cudaSuccess)
{
int64_t num_mod0_suffixes = (input_n / 3) * 1 + ((input_n % 3) != 0);
int64_t num_mod12_suffixes = (input_n / 3) * 2 + ((input_n % 3) == 2);
if ((status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(storage->device_temp_ISA, storage->device_ISA, reduced_n * sizeof(uint32_t), cudaMemcpyDeviceToDevice, storage->cuda_stream))) == cudaSuccess)
{
cub::DoubleBuffer<uint64_t> db_mod12_suffixes(storage->device_keys_temp_keys, storage->device_SA_temp_SA);
if (status == cudaSuccess)
{
{
int64_t n_preparing_blocks = (num_mod12_suffixes + storage->cuda_block_threads * 8 - 1) / (storage->cuda_block_threads * 8);
if (num_indexes > 1)
{
uint32_t rm = (uint32_t)(r - 1), rs = 0; while (rm >= ((uint32_t)1 << rs)) { rs += 1; }
libcubwt_prepare_mod12_suffixes_kernel<true><<<(uint32_t)n_preparing_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_T, storage->device_ISA,
db_mod12_suffixes.Current(),
rm, rs);
}
else
{
libcubwt_prepare_mod12_suffixes_kernel<false><<<(uint32_t)n_preparing_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_T, storage->device_ISA,
db_mod12_suffixes.Current(),
0, 0);
}
}
{
cub::DoubleBuffer<uint32_t> db_index(storage->device_ISA, storage->device_offsets);
status = libcubwt_scatter_values_uint64(storage, db_index, db_mod12_suffixes, num_mod12_suffixes, reduced_n, reduced_n - num_mod12_suffixes);
}
}
cub::DoubleBuffer<uint32_t> db_mod0h_suffixes(storage->device_ISA, storage->device_offsets);
cub::DoubleBuffer<uint64_t> db_mod0l_suffixes = db_mod12_suffixes.Current() == storage->device_keys_temp_keys
? cub::DoubleBuffer<uint64_t>((uint64_t *)storage->device_SA, (uint64_t *)storage->device_temp_SA)
: cub::DoubleBuffer<uint64_t>((uint64_t *)storage->device_keys, (uint64_t *)storage->device_temp_keys);
if (status == cudaSuccess)
{
{
int64_t n_preparing_blocks = (num_mod0_suffixes + storage->cuda_block_threads * 2 - 1) / (storage->cuda_block_threads * 2);
if (num_indexes > 1)
{
uint32_t rm = (uint32_t)(r - 1), rs = 0; while (rm >= ((uint32_t)1 << rs)) { rs += 1; }
libcubwt_prepare_mod0_suffixes_kernel<true><<<(uint32_t)n_preparing_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_T, storage->device_temp_ISA,
db_mod0l_suffixes.Current(), db_mod0h_suffixes.Current(),
rm, rs);
}
else
{
libcubwt_prepare_mod0_suffixes_kernel<false><<<(uint32_t)n_preparing_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
storage->device_T, storage->device_temp_ISA,
db_mod0l_suffixes.Current(), db_mod0h_suffixes.Current(),
0, 0);
}
}
if (reduced_n <= (1 << 24))
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_mod0l_suffixes, db_mod0h_suffixes,
(uint32_t)num_mod0_suffixes,
0, 24,
storage->cuda_stream));
if (status == cudaSuccess)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_mod0l_suffixes, db_mod0h_suffixes,
(uint32_t)num_mod0_suffixes,
32, 40,
storage->cuda_stream));
}
}
else
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(
storage->device_rsort_temp_storage, storage->device_rsort_temp_storage_size,
db_mod0l_suffixes, db_mod0h_suffixes,
(uint32_t)num_mod0_suffixes,
0, 40,
storage->cuda_stream));
}
}
if (status == cudaSuccess)
{
int64_t n_merging_blocks = (input_n + storage->cuda_block_threads * 5 - 1) / (storage->cuda_block_threads * 5);
{
libcubwt_set_sentinel_suffixes_kernel<<<6, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
db_mod0l_suffixes.Current() + num_mod0_suffixes,
db_mod0h_suffixes.Current() + num_mod0_suffixes,
db_mod12_suffixes.Current() + num_mod12_suffixes);
}
{
int64_t n_merge_initialization_blocks = 1 + (n_merging_blocks / storage->cuda_block_threads);
libcubwt_suffixes_merge_initialization_kernel<<<(uint32_t)n_merge_initialization_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
db_mod0l_suffixes.Current(), db_mod0h_suffixes.Current(), (uint32_t)(num_mod0_suffixes + 6 * storage->cuda_block_threads),
db_mod12_suffixes.Current(), (uint32_t)(num_mod12_suffixes + 6 * storage->cuda_block_threads),
(uint32_t *)storage->device_descriptors_large, (uint32_t)n_merging_blocks);
}
{
if (num_indexes > 1)
{
libcubwt_merge_suffixes_kernel<true><<<(uint32_t)n_merging_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
db_mod0l_suffixes.Current(), db_mod0h_suffixes.Current(), db_mod12_suffixes.Current(),
(uint32_t *)storage->device_descriptors_large,
(uint32_t *)storage->device_descriptors_small - 1,
storage->device_T);
}
else
{
libcubwt_merge_suffixes_kernel<false><<<(uint32_t)n_merging_blocks, storage->cuda_block_threads, 0, storage->cuda_stream>>>(
db_mod0l_suffixes.Current(), db_mod0h_suffixes.Current(), db_mod12_suffixes.Current(),
(uint32_t *)storage->device_descriptors_large,
(uint32_t *)storage->device_descriptors_small - 1,
storage->device_T);
}
}
}
if (status == cudaSuccess)
{
uint32_t * buffer = ((sizeof(uint32_t) * num_indexes) <= storage->host_pinned_storage_size) ? (uint32_t *)storage->host_pinned_storage : I;
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(buffer, storage->device_descriptors_small, sizeof(uint32_t) * num_indexes, cudaMemcpyDeviceToHost, storage->cuda_stream), status);
if ((status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaStreamSynchronize(storage->cuda_stream), status)) == cudaSuccess)
{
if (I != buffer) { memcpy(I, buffer, sizeof(uint32_t) * num_indexes); }
for (int64_t index = 0; index < num_indexes; index += 1) { I[index] += 1; }
}
}
}
}
return status;
}
static cudaError_t libcubwt_copy_burrows_wheeler_transform(LIBCUBWT_DEVICE_STORAGE * storage, const uint8_t * T, uint8_t * L, int64_t input_n, int64_t index)
{
cudaError_t status = cudaSuccess;
L[0] = T[input_n - 1];
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(L + 1, storage->device_T, (size_t)(index - 1), cudaMemcpyDeviceToHost, storage->cuda_stream), status);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(L + index, storage->device_T + index, (size_t)(input_n - index), cudaMemcpyDeviceToHost, storage->cuda_stream), status);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaStreamSynchronize(storage->cuda_stream), status);
return status;
}
int64_t libcubwt_allocate_device_storage(void ** device_storage, int64_t max_length)
{
int64_t max_reduced_length = ((max_length / 3) * 2 + 2 + 1023) & (-1024);
int64_t max_expanded_length = ((max_reduced_length / 2) * 3 + 0 + 1023) & (-1024);
if ((device_storage == NULL) || (max_expanded_length >= INT32_MAX))
{
return LIBCUBWT_BAD_PARAMETER;
}
*device_storage = NULL;
LIBCUBWT_DEVICE_STORAGE * storage = (LIBCUBWT_DEVICE_STORAGE *)malloc(sizeof(LIBCUBWT_DEVICE_STORAGE));
if (storage != NULL)
{
memset(storage, 0, sizeof(LIBCUBWT_DEVICE_STORAGE));
cudaError_t status = cudaSuccess;
{
int32_t cuda_device_ordinal;
int32_t cuda_device_L2_cache_size;
int32_t cuda_device_capability;
libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaGetDevice(&cuda_device_ordinal), status);
libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaDeviceGetAttribute(&cuda_device_L2_cache_size, cudaDevAttrL2CacheSize, cuda_device_ordinal), status);
libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::PtxVersion(cuda_device_capability, cuda_device_ordinal), status);
if (status == cudaSuccess)
{
storage->device_L2_cache_bits = 0; while (cuda_device_L2_cache_size >>= 1) { storage->device_L2_cache_bits += 1; };
storage->cuda_block_threads = (cuda_device_capability == 860 || cuda_device_capability == 870 || cuda_device_capability == 890) ? 768 : 512;
}
}
if (status == cudaSuccess)
{
int64_t num_descriptors = ((max_reduced_length / (storage->cuda_block_threads * 4)) + 1024) & (-1024);
{
cub::DoubleBuffer<uint8_t> uint8_db;
cub::DoubleBuffer<uint32_t> uint32_db;
cub::DoubleBuffer<uint64_t> uint64_db;
size_t temp_radix_segmented_sort_k32v32 = 0;
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceSegmentedSort::SortPairs(NULL, temp_radix_segmented_sort_k32v32, uint32_db, uint32_db, (int)max_reduced_length, (int)max_reduced_length / 2, uint32_db.Current(), uint32_db.Current()), status);
size_t temp_radix_sort_k32v32 = 0;
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(NULL, temp_radix_sort_k32v32, uint32_db, uint32_db, (uint32_t)max_reduced_length), status);
size_t temp_radix_sort_k64v32 = 0;
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(NULL, temp_radix_sort_k64v32, uint64_db, uint32_db, (uint32_t)max_reduced_length), status);
size_t temp_radix_sort_k32v64 = 0;
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(NULL, temp_radix_sort_k32v64, uint32_db, uint64_db, (uint32_t)max_reduced_length), status);
storage->device_ssort_temp_storage_size = std::max(temp_radix_segmented_sort_k32v32, (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint32_t));
storage->device_rsort_temp_storage_size = std::max(std::max(temp_radix_sort_k32v32, temp_radix_sort_k64v32), temp_radix_sort_k32v64);
storage->device_ssort_temp_storage_size = (storage->device_ssort_temp_storage_size + (size_t)1023) & (size_t)(-1024);
storage->device_rsort_temp_storage_size = (storage->device_rsort_temp_storage_size + (size_t)1023) & (size_t)(-1024);
}
if (status == cudaSuccess)
{
size_t device_storage_size = 0;
device_storage_size += storage->device_ssort_temp_storage_size;
device_storage_size += storage->device_rsort_temp_storage_size;
device_storage_size += (max_expanded_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint8_t);
device_storage_size += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint8_t);
device_storage_size += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
device_storage_size += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
device_storage_size += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
device_storage_size += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint4);
device_storage_size += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint4);
device_storage_size += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint2);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMalloc((void **)&storage->device_storage, device_storage_size), status);
if (status == cudaSuccess)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaMallocHost((void **)&storage->host_pinned_storage, storage->host_pinned_storage_size = 256 * sizeof(uint32_t)), status);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaStreamCreate(&storage->cuda_stream), status);
}
}
if (status == cudaSuccess)
{
uint8_t * device_alloc = (uint8_t *)storage->device_storage;
storage->device_ssort_temp_storage = (void *)device_alloc; device_alloc += storage->device_ssort_temp_storage_size;
storage->device_rsort_temp_storage = (void *)device_alloc; device_alloc += storage->device_rsort_temp_storage_size;
storage->device_T = (uint8_t *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (max_expanded_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint8_t);
storage->device_heads = (uint8_t *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint8_t);
storage->device_SA_temp_SA = (uint64_t *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
storage->device_keys_temp_keys = (uint64_t *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
storage->device_offsets_ISA = (uint64_t *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (max_reduced_length + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint64_t);
storage->device_descriptors_large = (uint4 *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint4);
storage->device_descriptors_copy = (uint4 *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint4);
storage->device_descriptors_small = (uint2 *)(void *)device_alloc + CUDA_DEVICE_PADDING; device_alloc += (num_descriptors + (int64_t)2 * CUDA_DEVICE_PADDING) * sizeof(uint2);
storage->device_temp_ISA = (uint32_t *)(void *)storage->device_ssort_temp_storage + CUDA_DEVICE_PADDING;
storage->device_SA = (uint32_t *)(void *)(storage->device_SA_temp_SA - CUDA_DEVICE_PADDING) + 1 * CUDA_DEVICE_PADDING;
storage->device_keys = (uint32_t *)(void *)(storage->device_keys_temp_keys - CUDA_DEVICE_PADDING) + 1 * CUDA_DEVICE_PADDING;
storage->device_offsets = (uint32_t *)(void *)(storage->device_offsets_ISA - CUDA_DEVICE_PADDING) + 1 * CUDA_DEVICE_PADDING;
storage->device_temp_SA = (uint32_t *)(void *)(storage->device_SA_temp_SA - CUDA_DEVICE_PADDING) + 3 * CUDA_DEVICE_PADDING + max_reduced_length;
storage->device_temp_keys = (uint32_t *)(void *)(storage->device_keys_temp_keys - CUDA_DEVICE_PADDING) + 3 * CUDA_DEVICE_PADDING + max_reduced_length;
storage->device_ISA = (uint32_t *)(void *)(storage->device_offsets_ISA - CUDA_DEVICE_PADDING) + 3 * CUDA_DEVICE_PADDING + max_reduced_length;
storage->max_length = max_length;
*device_storage = storage;
return LIBCUBWT_NO_ERROR;
}
}
libcubwt_free_device_storage(storage);
return libcubwt_get_error_code(status);
}
return LIBCUBWT_NOT_ENOUGH_MEMORY;
}
int64_t libcubwt_free_device_storage(void * device_storage)
{
cudaError_t status = cudaSuccess;
LIBCUBWT_DEVICE_STORAGE * storage = (LIBCUBWT_DEVICE_STORAGE *)device_storage;
if (storage != NULL)
{
if (storage->device_storage != NULL)
{
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaStreamDestroy(storage->cuda_stream), status);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaFreeHost((void *)storage->host_pinned_storage), status);
status = libcubwt_cuda_safe_call(__FILE__, __LINE__, cudaFree((void *)storage->device_storage), status);
}
free(storage);
}
return status != cudaSuccess ? libcubwt_get_error_code(status) : LIBCUBWT_NO_ERROR;
}
int64_t libcubwt_bwt(void * device_storage, const uint8_t * T, uint8_t * L, int64_t n)
{
LIBCUBWT_DEVICE_STORAGE * storage = (LIBCUBWT_DEVICE_STORAGE *)device_storage;
if ((storage == NULL) || (T == NULL) || (L == NULL) || (n < 16) || (n > storage->max_length))
{
return LIBCUBWT_BAD_PARAMETER;
}
cudaError_t status; uint32_t index;
if ((status = libcubwt_compute_burrows_wheeler_transform(storage, T, n, n, &index)) == cudaSuccess &&
(status = libcubwt_copy_burrows_wheeler_transform(storage, T, L, n, index)) == cudaSuccess)
{
return index;
}
return libcubwt_get_error_code(status);
}
int64_t libcubwt_bwt_aux(void * device_storage, const uint8_t * T, uint8_t * L, int64_t n, int64_t r, uint32_t * I)
{
LIBCUBWT_DEVICE_STORAGE * storage = (LIBCUBWT_DEVICE_STORAGE *)device_storage;
if ((storage == NULL) || (T == NULL) || (L == NULL) || (n < 16) || (n > storage->max_length) || (r < 4) || ((r & (r - 1)) != 0) || ((n + r - 1) / r > 255) || (I == NULL))
{
return LIBCUBWT_BAD_PARAMETER;
}
cudaError_t status;
if ((status = libcubwt_compute_burrows_wheeler_transform(storage, T, n, r, I)) == cudaSuccess &&
(status = libcubwt_copy_burrows_wheeler_transform(storage, T, L, n, I[0])) == cudaSuccess)
{
return LIBCUBWT_NO_ERROR;
}
return libcubwt_get_error_code(status);
}
|
1fa6e9403a6e2a65e55507eea25f811291ddb596.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <pycuda-helpers.hpp>
//Textures for conserv
texture< fp_tex_cudaP, hipTextureType3D, hipReadModeElementType> tex_1;
texture< fp_tex_cudaP, hipTextureType3D, hipReadModeElementType> tex_2;
texture< fp_tex_cudaP, hipTextureType3D, hipReadModeElementType> tex_3;
texture< fp_tex_cudaP, hipTextureType3D, hipReadModeElementType> tex_4;
texture< fp_tex_cudaP, hipTextureType3D, hipReadModeElementType> tex_5;
//Surfaces for Fluxes
surface< void, cudaSurfaceType3D> surf_flx_1;
surface< void, cudaSurfaceType3D> surf_flx_2;
surface< void, cudaSurfaceType3D> surf_flx_3;
surface< void, cudaSurfaceType3D> surf_flx_4;
surface< void, cudaSurfaceType3D> surf_flx_5;
// __device__ cudaP getPresure( cudaP gamma, cudaP rho, cudaP vel, cudaP E ){
// return ( E - rho*vel*vel/2 ) * (gamma-1);
// }
// __device__ float getSoundVel( cudaP gamma, cudaP rho, cudaP p ){
// return float( sqrt( gamma * p / rho ) );
// }
// __global__ void setFlux( int coord, cudaP gamma,
// cudaP* cnsv_1, cudaP* cnsv_2, cudaP* cnsv_3, cudaP* cnsv_4, cudaP* cnsv_5,
// float* soundVel2 ){
// int t_j = blockIdx.x*blockDim.x + threadIdx.x;
// int t_i = blockIdx.y*blockDim.y + threadIdx.y;
// int t_k = blockIdx.z*blockDim.z + threadIdx.z;
// int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
//
// cudaP rho, vx, vy, vz, p, E, v2;
// rho = cnsv_1[ tid ];
// vx = cnsv_2[ tid ] / rho;
// vy = cnsv_3[ tid ] / rho;
// vz = cnsv_4[ tid ] / rho;
// E = cnsv_5[ tid ];
// v2 = vx*vx + vy*vy + vz*vz;
// p = ( E - rho*v2/2 ) * (gamma-1);
//
// soundVel2[ tid ] = float( p * gamma / rho );
//
// // //Get the fluxes
// // cudaP f1, f2, f3, f4, f5;
// // if ( coord == 1 ){
// // f1 = rho * vx;
// // f2 = rho * vx * vx + p;
// // f3 = rho * vy * vx;
// // f4 = rho * vz * vz;
// // f5 = vx * ( E + p );
// // }
// // else if ( coord == 2){
// // f1 = rho * vy;
// // f2 = rho * vx * vy;
// // f3 = rho * vy * vy + p;
// // f4 = rho * vz * vy;
// // f5 = vy * ( E + p );
// // }
// // else if ( coord == 3){
// // f1 = rho * vz;
// // f2 = rho * vx * vz;
// // f3 = rho * vy * vz;
// // f4 = rho * vz * vz + p;
// // f5 = vz * ( E + p );
// // }
// //
// // //Write fluxes to surfaces
// // surf3Dwrite( f1, surf_flx_1, t_j*sizeof(cudaP), t_i, t_k, hipBoundaryModeClamp);
// // surf3Dwrite( f2, surf_flx_2, t_j*sizeof(cudaP), t_i, t_k, hipBoundaryModeClamp);
// // surf3Dwrite( f3, surf_flx_3, t_j*sizeof(cudaP), t_i, t_k, hipBoundaryModeClamp);
// // surf3Dwrite( f4, surf_flx_4, t_j*sizeof(cudaP), t_i, t_k, hipBoundaryModeClamp);
// // surf3Dwrite( f5, surf_flx_5, t_j*sizeof(cudaP), t_i, t_k, hipBoundaryModeClamp);
// }
__global__ void setInterFlux_hll( const int coord, const cudaP gamma, const cudaP dx, const cudaP dy, const cudaP dz,
cudaP* cnsv_1, cudaP* cnsv_2, cudaP* cnsv_3, cudaP* cnsv_4, cudaP* cnsv_5,
float* times ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int t_k = blockIdx.z*blockDim.z + threadIdx.z;
int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
cudaP v2;
cudaP rho_l, vx_l, vy_l, vz_l, E_l, p_l;
cudaP rho_c, vx_c, vy_c, vz_c, E_c, p_c;
// float time;
//Read adjacent conserv
if ( coord == 1 ){
rho_l = fp_tex3D( tex_1, t_j-1, t_i, t_k);
rho_c = fp_tex3D( tex_1, t_j, t_i, t_k);
vx_l = fp_tex3D( tex_2, t_j-1, t_i, t_k) / rho_l;
vx_c = fp_tex3D( tex_2, t_j, t_i, t_k) / rho_c;
vy_l = fp_tex3D( tex_3, t_j-1, t_i, t_k) / rho_l;
vy_c = fp_tex3D( tex_3, t_j, t_i, t_k) / rho_c;
vz_l = fp_tex3D( tex_4, t_j-1, t_i, t_k) / rho_l;
vz_c = fp_tex3D( tex_4, t_j, t_i, t_k) / rho_c;
E_l = fp_tex3D( tex_5, t_j-1, t_i, t_k);
E_c = fp_tex3D( tex_5, t_j, t_i, t_k);
//Boundary bounce condition
if ( t_j == 0 ) vx_l = -vx_c;
}
else if ( coord == 2 ){
rho_l = fp_tex3D( tex_1, t_j, t_i-1, t_k);
rho_c = fp_tex3D( tex_1, t_j, t_i, t_k);
vx_l = fp_tex3D( tex_2, t_j, t_i-1, t_k) / rho_l;
vx_c = fp_tex3D( tex_2, t_j, t_i, t_k) / rho_c;
vy_l = fp_tex3D( tex_3, t_j, t_i-1, t_k) / rho_l;
vy_c = fp_tex3D( tex_3, t_j, t_i, t_k) / rho_c;
vz_l = fp_tex3D( tex_4, t_j, t_i-1, t_k) / rho_l;
vz_c = fp_tex3D( tex_4, t_j, t_i, t_k) / rho_c;
E_l = fp_tex3D( tex_5, t_j, t_i-1, t_k);
E_c = fp_tex3D( tex_5, t_j, t_i, t_k);
//Boundary bounce condition
if ( t_i == 0 ) vy_l = -vy_c;
}
else if ( coord == 3 ){
rho_l = fp_tex3D( tex_1, t_j, t_i, t_k-1);
rho_c = fp_tex3D( tex_1, t_j, t_i, t_k);
vx_l = fp_tex3D( tex_2, t_j, t_i, t_k-1) / rho_l;
vx_c = fp_tex3D( tex_2, t_j, t_i, t_k) / rho_c;
vy_l = fp_tex3D( tex_3, t_j, t_i, t_k-1) / rho_l;
vy_c = fp_tex3D( tex_3, t_j, t_i, t_k) / rho_c;
vz_l = fp_tex3D( tex_4, t_j, t_i, t_k-1) / rho_l;
vz_c = fp_tex3D( tex_4, t_j, t_i, t_k) / rho_c;
E_l = fp_tex3D( tex_5, t_j, t_i, t_k-1);
E_c = fp_tex3D( tex_5, t_j, t_i, t_k);
//Boundary bounce condition
if ( t_k == 0 ) vz_l = -vz_c;
}
v2 = vx_l*vx_l + vy_l*vy_l + vz_l*vz_l;
p_l = ( E_l - rho_l*v2/2 ) * (gamma-1);
v2 = vx_c*vx_c + vy_c*vy_c + vz_c*vz_c;
p_c = ( E_c - rho_c*v2/2 ) * (gamma-1);
cudaP cs_l, cs_c, s_l, s_c;
cs_l = sqrt( p_l * gamma / rho_l );
cs_c = sqrt( p_c * gamma / rho_c );
if ( coord == 1 ){
s_l = min( vx_l - cs_l, vx_c - cs_c );
s_c = max( vx_l + cs_l, vx_c + cs_c );
//Use v2 to save time minimum
v2 = dx / ( abs( vx_c ) + cs_c );
v2 = min( v2, dy / ( abs( vy_c ) + cs_c ) );
v2 = min( v2, dz / ( abs( vz_c ) + cs_c ) );
times[ tid ] = v2;
}
else if ( coord == 2 ){
s_l = min( vy_l - cs_l, vy_c - cs_c );
s_c = max( vy_l + cs_l, vy_c + cs_c );
}
else if ( coord == 3 ){
s_l = min( vz_l - cs_l, vz_c - cs_c );
s_c = max( vz_l + cs_l, vz_c + cs_c );
}
// Adjacent fluxes from left and center cell
cudaP F_l, F_c, iFlx;
//iFlx rho
if ( coord == 1 ){
F_l = rho_l * vx_l;
F_c = rho_c * vx_c;
}
else if ( coord == 2 ){
F_l = rho_l * vy_l;
F_c = rho_c * vy_c;
}
else if ( coord == 3 ){
F_l = rho_l * vz_l;
F_c = rho_c * vz_c;
}
if ( s_l > 0 ) iFlx = F_l;
else if ( s_c < 0 ) iFlx = F_c;
else iFlx = ( s_c*F_l - s_l*F_c + s_l*s_c*( rho_c - rho_l ) ) / ( s_c - s_l );
surf3Dwrite( iFlx, surf_flx_1, t_j*sizeof(cudaP), t_i, t_k, hipBoundaryModeClamp);
//iFlx rho * vx
if ( coord == 1 ){
F_l = rho_l * vx_l * vx_l + p_l;
F_c = rho_c * vx_c * vx_c + p_c;
}
else if ( coord == 2 ){
F_l = rho_l * vx_l * vy_l;
F_c = rho_c * vx_c * vy_c;
}
else if ( coord == 3 ){
F_l = rho_l * vx_l * vz_l;
F_c = rho_c * vx_c * vz_c;
}
if ( s_l > 0 ) iFlx = F_l;
else if ( s_c < 0 ) iFlx = F_c;
else iFlx = ( s_c*F_l - s_l*F_c + s_l*s_c*( rho_c*vx_c - rho_l*vx_l ) ) / ( s_c - s_l );
surf3Dwrite( iFlx, surf_flx_2, t_j*sizeof(cudaP), t_i, t_k, hipBoundaryModeClamp);
//iFlx rho * vy
if ( coord == 1 ){
F_l = rho_l * vy_l * vx_l ;
F_c = rho_c * vy_c * vx_c ;
}
else if ( coord == 2 ){
F_l = rho_l * vy_l * vy_l + p_l;
F_c = rho_c * vy_c * vy_c + p_c;
}
else if ( coord == 3 ){
F_l = rho_l * vy_l * vz_l;
F_c = rho_c * vy_c * vz_c;
}
if ( s_l > 0 ) iFlx = F_l;
else if ( s_c < 0 ) iFlx = F_c;
else iFlx = ( s_c*F_l - s_l*F_c + s_l*s_c*( rho_c*vy_c - rho_l*vy_l ) ) / ( s_c - s_l );
surf3Dwrite( iFlx, surf_flx_3, t_j*sizeof(cudaP), t_i, t_k, hipBoundaryModeClamp);
//iFlx rho * vz
if ( coord == 1 ){
F_l = rho_l * vz_l * vx_l ;
F_c = rho_c * vz_c * vx_c ;
}
else if ( coord == 2 ){
F_l = rho_l * vz_l * vy_l ;
F_c = rho_c * vz_c * vy_c ;
}
else if ( coord == 3 ){
F_l = rho_l * vz_l * vz_l + p_l ;
F_c = rho_c * vz_c * vz_c + p_c ;
}
if ( s_l > 0 ) iFlx = F_l;
else if ( s_c < 0 ) iFlx = F_c;
else iFlx = ( s_c*F_l - s_l*F_c + s_l*s_c*( rho_c*vz_c - rho_l*vz_l ) ) / ( s_c - s_l );
surf3Dwrite( iFlx, surf_flx_4, t_j*sizeof(cudaP), t_i, t_k, hipBoundaryModeClamp);
//iFlx E
if ( coord == 1 ){
F_l = vx_l * ( E_l + p_l ) ;
F_c = vx_c * ( E_c + p_c ) ;
}
else if ( coord == 2 ){
F_l = vy_l * ( E_l + p_l ) ;
F_c = vy_c * ( E_c + p_c ) ;
}
else if ( coord == 3 ){
F_l = vz_l * ( E_l + p_l ) ;
F_c = vz_c * ( E_c + p_c ) ;
}
if ( s_l > 0 ) iFlx = F_l;
else if ( s_c < 0 ) iFlx = F_c;
else iFlx = ( s_c*F_l - s_l*F_c + s_l*s_c*( E_c - E_l ) ) / ( s_c - s_l );
surf3Dwrite( iFlx, surf_flx_5, t_j*sizeof(cudaP), t_i, t_k, hipBoundaryModeClamp);
}
__global__ void getInterFlux_hll( const int coord, const cudaP dt, const cudaP gamma, const cudaP dx, const cudaP dy, const cudaP dz,
cudaP* cnsv_1, cudaP* cnsv_2, cudaP* cnsv_3, cudaP* cnsv_4, cudaP* cnsv_5 ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int t_k = blockIdx.z*blockDim.z + threadIdx.z;
int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
//Read inter-cell fluxes from textures
cudaP iFlx1_l, iFlx2_l, iFlx3_l, iFlx4_l, iFlx5_l;
cudaP iFlx1_r, iFlx2_r, iFlx3_r, iFlx4_r, iFlx5_r;
cudaP delta;
if ( coord == 1 ){
delta = dt / dx;
iFlx1_l = fp_tex3D( tex_1, t_j, t_i, t_k);
iFlx1_r = fp_tex3D( tex_1, t_j+1, t_i, t_k);
iFlx2_l = fp_tex3D( tex_2, t_j, t_i, t_k);
iFlx2_r = fp_tex3D( tex_2, t_j+1, t_i, t_k);
iFlx3_l = fp_tex3D( tex_3, t_j, t_i, t_k);
iFlx3_r = fp_tex3D( tex_3, t_j+1, t_i, t_k);
iFlx4_l = fp_tex3D( tex_4, t_j, t_i, t_k);
iFlx4_r = fp_tex3D( tex_4, t_j+1, t_i, t_k);
iFlx5_l = fp_tex3D( tex_5, t_j, t_i, t_k);
iFlx5_r = fp_tex3D( tex_5, t_j+1, t_i, t_k);
}
else if ( coord == 2 ){
delta = dt / dy;
iFlx1_l = fp_tex3D( tex_1, t_j, t_i, t_k);
iFlx1_r = fp_tex3D( tex_1, t_j, t_i+1, t_k);
iFlx2_l = fp_tex3D( tex_2, t_j, t_i, t_k);
iFlx2_r = fp_tex3D( tex_2, t_j, t_i+1, t_k);
iFlx3_l = fp_tex3D( tex_3, t_j, t_i, t_k);
iFlx3_r = fp_tex3D( tex_3, t_j, t_i+1, t_k);
iFlx4_l = fp_tex3D( tex_4, t_j, t_i, t_k);
iFlx4_r = fp_tex3D( tex_4, t_j, t_i+1, t_k);
iFlx5_l = fp_tex3D( tex_5, t_j, t_i, t_k);
iFlx5_r = fp_tex3D( tex_5, t_j, t_i+1, t_k);
}
else if ( coord == 3 ){
delta = dt / dz;
iFlx1_l = fp_tex3D( tex_1, t_j, t_i, t_k);
iFlx1_r = fp_tex3D( tex_1, t_j, t_i, t_k+1);
iFlx2_l = fp_tex3D( tex_2, t_j, t_i, t_k);
iFlx2_r = fp_tex3D( tex_2, t_j, t_i, t_k+1);
iFlx3_l = fp_tex3D( tex_3, t_j, t_i, t_k);
iFlx3_r = fp_tex3D( tex_3, t_j, t_i, t_k+1);
iFlx4_l = fp_tex3D( tex_4, t_j, t_i, t_k);
iFlx4_r = fp_tex3D( tex_4, t_j, t_i, t_k+1);
iFlx5_l = fp_tex3D( tex_5, t_j, t_i, t_k);
iFlx5_r = fp_tex3D( tex_5, t_j, t_i, t_k+1);
}
//Advance the consv values
cnsv_1[ tid ] = cnsv_1[ tid ] - delta*( iFlx1_r - iFlx1_l );
cnsv_2[ tid ] = cnsv_2[ tid ] - delta*( iFlx2_r - iFlx2_l );
cnsv_3[ tid ] = cnsv_3[ tid ] - delta*( iFlx3_r - iFlx3_l );
cnsv_4[ tid ] = cnsv_4[ tid ] - delta*( iFlx4_r - iFlx4_l );
cnsv_5[ tid ] = cnsv_5[ tid ] - delta*( iFlx5_r - iFlx5_l );
}
| 1fa6e9403a6e2a65e55507eea25f811291ddb596.cu | #include <pycuda-helpers.hpp>
//Textures for conserv
texture< fp_tex_cudaP, cudaTextureType3D, cudaReadModeElementType> tex_1;
texture< fp_tex_cudaP, cudaTextureType3D, cudaReadModeElementType> tex_2;
texture< fp_tex_cudaP, cudaTextureType3D, cudaReadModeElementType> tex_3;
texture< fp_tex_cudaP, cudaTextureType3D, cudaReadModeElementType> tex_4;
texture< fp_tex_cudaP, cudaTextureType3D, cudaReadModeElementType> tex_5;
//Surfaces for Fluxes
surface< void, cudaSurfaceType3D> surf_flx_1;
surface< void, cudaSurfaceType3D> surf_flx_2;
surface< void, cudaSurfaceType3D> surf_flx_3;
surface< void, cudaSurfaceType3D> surf_flx_4;
surface< void, cudaSurfaceType3D> surf_flx_5;
// __device__ cudaP getPresure( cudaP gamma, cudaP rho, cudaP vel, cudaP E ){
// return ( E - rho*vel*vel/2 ) * (gamma-1);
// }
// __device__ float getSoundVel( cudaP gamma, cudaP rho, cudaP p ){
// return float( sqrt( gamma * p / rho ) );
// }
// __global__ void setFlux( int coord, cudaP gamma,
// cudaP* cnsv_1, cudaP* cnsv_2, cudaP* cnsv_3, cudaP* cnsv_4, cudaP* cnsv_5,
// float* soundVel2 ){
// int t_j = blockIdx.x*blockDim.x + threadIdx.x;
// int t_i = blockIdx.y*blockDim.y + threadIdx.y;
// int t_k = blockIdx.z*blockDim.z + threadIdx.z;
// int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
//
// cudaP rho, vx, vy, vz, p, E, v2;
// rho = cnsv_1[ tid ];
// vx = cnsv_2[ tid ] / rho;
// vy = cnsv_3[ tid ] / rho;
// vz = cnsv_4[ tid ] / rho;
// E = cnsv_5[ tid ];
// v2 = vx*vx + vy*vy + vz*vz;
// p = ( E - rho*v2/2 ) * (gamma-1);
//
// soundVel2[ tid ] = float( p * gamma / rho );
//
// // //Get the fluxes
// // cudaP f1, f2, f3, f4, f5;
// // if ( coord == 1 ){
// // f1 = rho * vx;
// // f2 = rho * vx * vx + p;
// // f3 = rho * vy * vx;
// // f4 = rho * vz * vz;
// // f5 = vx * ( E + p );
// // }
// // else if ( coord == 2){
// // f1 = rho * vy;
// // f2 = rho * vx * vy;
// // f3 = rho * vy * vy + p;
// // f4 = rho * vz * vy;
// // f5 = vy * ( E + p );
// // }
// // else if ( coord == 3){
// // f1 = rho * vz;
// // f2 = rho * vx * vz;
// // f3 = rho * vy * vz;
// // f4 = rho * vz * vz + p;
// // f5 = vz * ( E + p );
// // }
// //
// // //Write fluxes to surfaces
// // surf3Dwrite( f1, surf_flx_1, t_j*sizeof(cudaP), t_i, t_k, cudaBoundaryModeClamp);
// // surf3Dwrite( f2, surf_flx_2, t_j*sizeof(cudaP), t_i, t_k, cudaBoundaryModeClamp);
// // surf3Dwrite( f3, surf_flx_3, t_j*sizeof(cudaP), t_i, t_k, cudaBoundaryModeClamp);
// // surf3Dwrite( f4, surf_flx_4, t_j*sizeof(cudaP), t_i, t_k, cudaBoundaryModeClamp);
// // surf3Dwrite( f5, surf_flx_5, t_j*sizeof(cudaP), t_i, t_k, cudaBoundaryModeClamp);
// }
__global__ void setInterFlux_hll( const int coord, const cudaP gamma, const cudaP dx, const cudaP dy, const cudaP dz,
cudaP* cnsv_1, cudaP* cnsv_2, cudaP* cnsv_3, cudaP* cnsv_4, cudaP* cnsv_5,
float* times ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int t_k = blockIdx.z*blockDim.z + threadIdx.z;
int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
cudaP v2;
cudaP rho_l, vx_l, vy_l, vz_l, E_l, p_l;
cudaP rho_c, vx_c, vy_c, vz_c, E_c, p_c;
// float time;
//Read adjacent conserv
if ( coord == 1 ){
rho_l = fp_tex3D( tex_1, t_j-1, t_i, t_k);
rho_c = fp_tex3D( tex_1, t_j, t_i, t_k);
vx_l = fp_tex3D( tex_2, t_j-1, t_i, t_k) / rho_l;
vx_c = fp_tex3D( tex_2, t_j, t_i, t_k) / rho_c;
vy_l = fp_tex3D( tex_3, t_j-1, t_i, t_k) / rho_l;
vy_c = fp_tex3D( tex_3, t_j, t_i, t_k) / rho_c;
vz_l = fp_tex3D( tex_4, t_j-1, t_i, t_k) / rho_l;
vz_c = fp_tex3D( tex_4, t_j, t_i, t_k) / rho_c;
E_l = fp_tex3D( tex_5, t_j-1, t_i, t_k);
E_c = fp_tex3D( tex_5, t_j, t_i, t_k);
//Boundary bounce condition
if ( t_j == 0 ) vx_l = -vx_c;
}
else if ( coord == 2 ){
rho_l = fp_tex3D( tex_1, t_j, t_i-1, t_k);
rho_c = fp_tex3D( tex_1, t_j, t_i, t_k);
vx_l = fp_tex3D( tex_2, t_j, t_i-1, t_k) / rho_l;
vx_c = fp_tex3D( tex_2, t_j, t_i, t_k) / rho_c;
vy_l = fp_tex3D( tex_3, t_j, t_i-1, t_k) / rho_l;
vy_c = fp_tex3D( tex_3, t_j, t_i, t_k) / rho_c;
vz_l = fp_tex3D( tex_4, t_j, t_i-1, t_k) / rho_l;
vz_c = fp_tex3D( tex_4, t_j, t_i, t_k) / rho_c;
E_l = fp_tex3D( tex_5, t_j, t_i-1, t_k);
E_c = fp_tex3D( tex_5, t_j, t_i, t_k);
//Boundary bounce condition
if ( t_i == 0 ) vy_l = -vy_c;
}
else if ( coord == 3 ){
rho_l = fp_tex3D( tex_1, t_j, t_i, t_k-1);
rho_c = fp_tex3D( tex_1, t_j, t_i, t_k);
vx_l = fp_tex3D( tex_2, t_j, t_i, t_k-1) / rho_l;
vx_c = fp_tex3D( tex_2, t_j, t_i, t_k) / rho_c;
vy_l = fp_tex3D( tex_3, t_j, t_i, t_k-1) / rho_l;
vy_c = fp_tex3D( tex_3, t_j, t_i, t_k) / rho_c;
vz_l = fp_tex3D( tex_4, t_j, t_i, t_k-1) / rho_l;
vz_c = fp_tex3D( tex_4, t_j, t_i, t_k) / rho_c;
E_l = fp_tex3D( tex_5, t_j, t_i, t_k-1);
E_c = fp_tex3D( tex_5, t_j, t_i, t_k);
//Boundary bounce condition
if ( t_k == 0 ) vz_l = -vz_c;
}
v2 = vx_l*vx_l + vy_l*vy_l + vz_l*vz_l;
p_l = ( E_l - rho_l*v2/2 ) * (gamma-1);
v2 = vx_c*vx_c + vy_c*vy_c + vz_c*vz_c;
p_c = ( E_c - rho_c*v2/2 ) * (gamma-1);
cudaP cs_l, cs_c, s_l, s_c;
cs_l = sqrt( p_l * gamma / rho_l );
cs_c = sqrt( p_c * gamma / rho_c );
if ( coord == 1 ){
s_l = min( vx_l - cs_l, vx_c - cs_c );
s_c = max( vx_l + cs_l, vx_c + cs_c );
//Use v2 to save time minimum
v2 = dx / ( abs( vx_c ) + cs_c );
v2 = min( v2, dy / ( abs( vy_c ) + cs_c ) );
v2 = min( v2, dz / ( abs( vz_c ) + cs_c ) );
times[ tid ] = v2;
}
else if ( coord == 2 ){
s_l = min( vy_l - cs_l, vy_c - cs_c );
s_c = max( vy_l + cs_l, vy_c + cs_c );
}
else if ( coord == 3 ){
s_l = min( vz_l - cs_l, vz_c - cs_c );
s_c = max( vz_l + cs_l, vz_c + cs_c );
}
// Adjacent fluxes from left and center cell
cudaP F_l, F_c, iFlx;
//iFlx rho
if ( coord == 1 ){
F_l = rho_l * vx_l;
F_c = rho_c * vx_c;
}
else if ( coord == 2 ){
F_l = rho_l * vy_l;
F_c = rho_c * vy_c;
}
else if ( coord == 3 ){
F_l = rho_l * vz_l;
F_c = rho_c * vz_c;
}
if ( s_l > 0 ) iFlx = F_l;
else if ( s_c < 0 ) iFlx = F_c;
else iFlx = ( s_c*F_l - s_l*F_c + s_l*s_c*( rho_c - rho_l ) ) / ( s_c - s_l );
surf3Dwrite( iFlx, surf_flx_1, t_j*sizeof(cudaP), t_i, t_k, cudaBoundaryModeClamp);
//iFlx rho * vx
if ( coord == 1 ){
F_l = rho_l * vx_l * vx_l + p_l;
F_c = rho_c * vx_c * vx_c + p_c;
}
else if ( coord == 2 ){
F_l = rho_l * vx_l * vy_l;
F_c = rho_c * vx_c * vy_c;
}
else if ( coord == 3 ){
F_l = rho_l * vx_l * vz_l;
F_c = rho_c * vx_c * vz_c;
}
if ( s_l > 0 ) iFlx = F_l;
else if ( s_c < 0 ) iFlx = F_c;
else iFlx = ( s_c*F_l - s_l*F_c + s_l*s_c*( rho_c*vx_c - rho_l*vx_l ) ) / ( s_c - s_l );
surf3Dwrite( iFlx, surf_flx_2, t_j*sizeof(cudaP), t_i, t_k, cudaBoundaryModeClamp);
//iFlx rho * vy
if ( coord == 1 ){
F_l = rho_l * vy_l * vx_l ;
F_c = rho_c * vy_c * vx_c ;
}
else if ( coord == 2 ){
F_l = rho_l * vy_l * vy_l + p_l;
F_c = rho_c * vy_c * vy_c + p_c;
}
else if ( coord == 3 ){
F_l = rho_l * vy_l * vz_l;
F_c = rho_c * vy_c * vz_c;
}
if ( s_l > 0 ) iFlx = F_l;
else if ( s_c < 0 ) iFlx = F_c;
else iFlx = ( s_c*F_l - s_l*F_c + s_l*s_c*( rho_c*vy_c - rho_l*vy_l ) ) / ( s_c - s_l );
surf3Dwrite( iFlx, surf_flx_3, t_j*sizeof(cudaP), t_i, t_k, cudaBoundaryModeClamp);
//iFlx rho * vz
if ( coord == 1 ){
F_l = rho_l * vz_l * vx_l ;
F_c = rho_c * vz_c * vx_c ;
}
else if ( coord == 2 ){
F_l = rho_l * vz_l * vy_l ;
F_c = rho_c * vz_c * vy_c ;
}
else if ( coord == 3 ){
F_l = rho_l * vz_l * vz_l + p_l ;
F_c = rho_c * vz_c * vz_c + p_c ;
}
if ( s_l > 0 ) iFlx = F_l;
else if ( s_c < 0 ) iFlx = F_c;
else iFlx = ( s_c*F_l - s_l*F_c + s_l*s_c*( rho_c*vz_c - rho_l*vz_l ) ) / ( s_c - s_l );
surf3Dwrite( iFlx, surf_flx_4, t_j*sizeof(cudaP), t_i, t_k, cudaBoundaryModeClamp);
//iFlx E
if ( coord == 1 ){
F_l = vx_l * ( E_l + p_l ) ;
F_c = vx_c * ( E_c + p_c ) ;
}
else if ( coord == 2 ){
F_l = vy_l * ( E_l + p_l ) ;
F_c = vy_c * ( E_c + p_c ) ;
}
else if ( coord == 3 ){
F_l = vz_l * ( E_l + p_l ) ;
F_c = vz_c * ( E_c + p_c ) ;
}
if ( s_l > 0 ) iFlx = F_l;
else if ( s_c < 0 ) iFlx = F_c;
else iFlx = ( s_c*F_l - s_l*F_c + s_l*s_c*( E_c - E_l ) ) / ( s_c - s_l );
surf3Dwrite( iFlx, surf_flx_5, t_j*sizeof(cudaP), t_i, t_k, cudaBoundaryModeClamp);
}
__global__ void getInterFlux_hll( const int coord, const cudaP dt, const cudaP gamma, const cudaP dx, const cudaP dy, const cudaP dz,
cudaP* cnsv_1, cudaP* cnsv_2, cudaP* cnsv_3, cudaP* cnsv_4, cudaP* cnsv_5 ){
int t_j = blockIdx.x*blockDim.x + threadIdx.x;
int t_i = blockIdx.y*blockDim.y + threadIdx.y;
int t_k = blockIdx.z*blockDim.z + threadIdx.z;
int tid = t_j + t_i*blockDim.x*gridDim.x + t_k*blockDim.x*gridDim.x*blockDim.y*gridDim.y;
//Read inter-cell fluxes from textures
cudaP iFlx1_l, iFlx2_l, iFlx3_l, iFlx4_l, iFlx5_l;
cudaP iFlx1_r, iFlx2_r, iFlx3_r, iFlx4_r, iFlx5_r;
cudaP delta;
if ( coord == 1 ){
delta = dt / dx;
iFlx1_l = fp_tex3D( tex_1, t_j, t_i, t_k);
iFlx1_r = fp_tex3D( tex_1, t_j+1, t_i, t_k);
iFlx2_l = fp_tex3D( tex_2, t_j, t_i, t_k);
iFlx2_r = fp_tex3D( tex_2, t_j+1, t_i, t_k);
iFlx3_l = fp_tex3D( tex_3, t_j, t_i, t_k);
iFlx3_r = fp_tex3D( tex_3, t_j+1, t_i, t_k);
iFlx4_l = fp_tex3D( tex_4, t_j, t_i, t_k);
iFlx4_r = fp_tex3D( tex_4, t_j+1, t_i, t_k);
iFlx5_l = fp_tex3D( tex_5, t_j, t_i, t_k);
iFlx5_r = fp_tex3D( tex_5, t_j+1, t_i, t_k);
}
else if ( coord == 2 ){
delta = dt / dy;
iFlx1_l = fp_tex3D( tex_1, t_j, t_i, t_k);
iFlx1_r = fp_tex3D( tex_1, t_j, t_i+1, t_k);
iFlx2_l = fp_tex3D( tex_2, t_j, t_i, t_k);
iFlx2_r = fp_tex3D( tex_2, t_j, t_i+1, t_k);
iFlx3_l = fp_tex3D( tex_3, t_j, t_i, t_k);
iFlx3_r = fp_tex3D( tex_3, t_j, t_i+1, t_k);
iFlx4_l = fp_tex3D( tex_4, t_j, t_i, t_k);
iFlx4_r = fp_tex3D( tex_4, t_j, t_i+1, t_k);
iFlx5_l = fp_tex3D( tex_5, t_j, t_i, t_k);
iFlx5_r = fp_tex3D( tex_5, t_j, t_i+1, t_k);
}
else if ( coord == 3 ){
delta = dt / dz;
iFlx1_l = fp_tex3D( tex_1, t_j, t_i, t_k);
iFlx1_r = fp_tex3D( tex_1, t_j, t_i, t_k+1);
iFlx2_l = fp_tex3D( tex_2, t_j, t_i, t_k);
iFlx2_r = fp_tex3D( tex_2, t_j, t_i, t_k+1);
iFlx3_l = fp_tex3D( tex_3, t_j, t_i, t_k);
iFlx3_r = fp_tex3D( tex_3, t_j, t_i, t_k+1);
iFlx4_l = fp_tex3D( tex_4, t_j, t_i, t_k);
iFlx4_r = fp_tex3D( tex_4, t_j, t_i, t_k+1);
iFlx5_l = fp_tex3D( tex_5, t_j, t_i, t_k);
iFlx5_r = fp_tex3D( tex_5, t_j, t_i, t_k+1);
}
//Advance the consv values
cnsv_1[ tid ] = cnsv_1[ tid ] - delta*( iFlx1_r - iFlx1_l );
cnsv_2[ tid ] = cnsv_2[ tid ] - delta*( iFlx2_r - iFlx2_l );
cnsv_3[ tid ] = cnsv_3[ tid ] - delta*( iFlx3_r - iFlx3_l );
cnsv_4[ tid ] = cnsv_4[ tid ] - delta*( iFlx4_r - iFlx4_l );
cnsv_5[ tid ] = cnsv_5[ tid ] - delta*( iFlx5_r - iFlx5_l );
}
|
f316973c1dabf507b010128f203535719f477691.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void VecAdd(const int* A, const int* B, int* C, int N) {
// Index holen
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
} | f316973c1dabf507b010128f203535719f477691.cu | #include "includes.h"
__global__ void VecAdd(const int* A, const int* B, int* C, int N) {
// Index holen
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
} |
2f805e5b467ac6fd40e4c07e2fd92a709b30ba2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/partitioning.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace {
// Launch configuration for optimized hash partition
constexpr size_type OPTIMIZED_BLOCK_SIZE = 512;
constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8;
constexpr size_type ELEMENTS_PER_THREAD = 2;
constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024;
// Launch configuration for fallback hash partition
constexpr size_type FALLBACK_BLOCK_SIZE = 256;
constexpr size_type FALLBACK_ROWS_PER_THREAD = 1;
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
template <typename hash_value_t>
class modulo_partitioner {
public:
modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {}
__device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; }
private:
const size_type divisor;
};
template <typename T>
bool is_power_two(T number)
{
return (0 == (number & (number - 1)));
}
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses a bitwise mask. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently
* via a single bitwise AND as: n & (d - 1)
*/
template <typename hash_value_t>
class bitwise_partitioner {
public:
bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)}
{
assert(is_power_two(num_partitions));
}
__device__ size_type operator()(hash_value_t hash_value) const
{
return hash_value & mask; // hash_value & (num_partitions - 1)
}
private:
const size_type mask;
};
/**
* @brief Computes which partition each row of a device_table will belong to
based on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the
global size of each partition across all thread blocks.
*
* @param[in] the_table The table whose rows will be partitioned
* @param[in] num_rows The number of rows in the table
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] the_partitioner The functor that maps a rows hash value to a
partition number
* @param[out] row_partition_numbers Array that holds which partition each row
belongs to
* @param[out] row_partition_offset Array that holds the offset of each row in
its partition of
* the thread block
* @param[out] block_partition_sizes Array that holds the size of each partition
for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1
partition(num_partitions -1) size, ...} }
* @param[out] global_partition_sizes The number of rows in each partition.
*/
template <class row_hasher_t, typename partitioner_type>
__global__ void compute_row_partition_numbers(row_hasher_t the_hasher,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type* __restrict__ row_partition_numbers,
size_type* __restrict__ row_partition_offset,
size_type* __restrict__ block_partition_sizes,
size_type* __restrict__ global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while (row_number < num_rows) {
const hash_value_type row_hash_value = the_hasher(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
row_partition_offset[row_number] =
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while (partition_number < num_partitions) {
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/**
* @brief Given an array of partition numbers, computes the final output
location for each element in the output such that all rows with the same
partition are contiguous in memory.
*
* @param row_partition_numbers The array that records the partition number for
each row
* @param num_rows The number of rows
* @param num_partitions THe number of partitions
* @param[out] block_partition_offsets Array that holds the offset of each
partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1
partition(num_partitions -1) offset, ...} }
*/
__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type* __restrict__ block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_offsets[partition_number] =
block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while (row_number < num_rows) {
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the
// corresponding partition offset for this block
const size_type row_output_location =
atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/**
* @brief Move one column from the input table to the hashed table.
*
* @param[in] input_buf Data buffer of the column in the input table
* @param[out] output_buf Preallocated data buffer of the column in the output
* table
* @param[in] num_rows The number of rows in each column
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] row_partition_numbers Array that holds which partition each row
* belongs to
* @param[in] row_partition_offset Array that holds the offset of each row in
* its partition of the thread block.
* @param[in] block_partition_sizes Array that holds the size of each partition
* for each block
* @param[in] scanned_block_partition_sizes The scan of block_partition_sizes
*/
template <typename InputIter, typename DataType>
__global__ void copy_block_partitions(InputIter input_iter,
DataType* __restrict__ output_buf,
const size_type num_rows,
const size_type num_partitions,
size_type const* __restrict__ row_partition_numbers,
size_type const* __restrict__ row_partition_offset,
size_type const* __restrict__ block_partition_sizes,
size_type const* __restrict__ scanned_block_partition_sizes)
{
extern __shared__ char shared_memory[];
auto block_output = reinterpret_cast<DataType*>(shared_memory);
auto partition_offset_shared =
reinterpret_cast<size_type*>(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD);
auto partition_offset_global = partition_offset_shared + num_partitions + 1;
typedef hipcub::BlockScan<size_type, OPTIMIZED_BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
// use ELEMENTS_PER_THREAD=2 to support upto 1024 partitions
size_type temp_histo[ELEMENTS_PER_THREAD];
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
temp_histo[i] =
block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x];
} else {
temp_histo[i] = 0;
}
}
__syncthreads();
BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo);
__syncthreads();
if (threadIdx.x == 0) { partition_offset_shared[0] = 0; }
// Calculate the offset in shared memory of each partition in this thread
// block
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i];
}
}
// Fetch the offset in the output buffer of each partition in this thread
// block
for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) {
partition_offset_global[ipartition] =
scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x];
}
__syncthreads();
// Fetch the input data to shared memory
for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows;
row_number += blockDim.x * gridDim.x) {
size_type const ipartition = row_partition_numbers[row_number];
block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] =
input_iter[row_number];
}
__syncthreads();
// Copy data from shared memory to output using 32 threads for each partition
constexpr int nthreads_partition = 32;
static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0,
"BLOCK_SIZE must be divisible by number of threads");
for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions;
ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) {
size_type const nelements_partition =
partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition];
for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition;
row_offset += nthreads_partition) {
output_buf[partition_offset_global[ipartition] + row_offset] =
block_output[partition_offset_shared[ipartition] + row_offset];
}
}
}
template <typename InputIter, typename OutputIter>
void copy_block_partitions_impl(InputIter const input,
OutputIter output,
size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
// We need 3 chunks of shared memory:
// 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output
// 2. num_partitions + 1 elements of size_type for per-block partition offsets
// 3. num_partitions + 1 elements of size_type for global partition offsets
int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) +
(num_partitions + 1) * sizeof(size_type) * 2;
hipLaunchKernelGGL(( copy_block_partitions), dim3(grid_size), dim3(OPTIMIZED_BLOCK_SIZE), smem, stream.value(),
input,
output,
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes);
}
rmm::device_uvector<size_type> compute_gather_map(size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
auto sequence = thrust::make_counting_iterator(0);
rmm::device_uvector<size_type> gather_map(num_rows, stream);
copy_block_partitions_impl(sequence,
gather_map.begin(),
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return gather_map;
}
struct copy_block_partitions_dispatcher {
template <typename DataType, std::enable_if_t<is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr);
copy_block_partitions_impl(input.data<DataType>(),
static_cast<DataType*>(output.data()),
input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return std::make_unique<column>(input.type(), input.size(), std::move(output));
}
template <typename DataType, std::enable_if_t<not is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Use move_to_output_buffer to create an equivalent gather map
auto gather_map = compute_gather_map(input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
// Use gather instead for non-fixed width types
return type_dispatcher(input.type(),
detail::column_gatherer{},
input,
gather_map.begin(),
gather_map.end(),
false,
stream,
mr);
}
};
// NOTE hash_has_nulls must be true if table_to_hash has nulls
template <template <typename> class hash_function, bool hash_has_nulls>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition_table(
table_view const& input,
table_view const& table_to_hash,
size_type num_partitions,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = table_to_hash.num_rows();
bool const use_optimization{num_partitions <= THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL};
auto const block_size = use_optimization ? OPTIMIZED_BLOCK_SIZE : FALLBACK_BLOCK_SIZE;
auto const rows_per_thread =
use_optimization ? OPTIMIZED_ROWS_PER_THREAD : FALLBACK_ROWS_PER_THREAD;
auto const rows_per_block = block_size * rows_per_thread;
// NOTE grid_size is non-const to workaround lambda capture bug in gcc 5.4
auto grid_size = util::div_rounding_up_safe(num_rows, rows_per_block);
// Allocate array to hold which partition each row belongs to
auto row_partition_numbers = rmm::device_uvector<size_type>(num_rows, stream);
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1
// partition(num_partitions -1) size, ...} }
auto block_partition_sizes = rmm::device_uvector<size_type>(grid_size * num_partitions, stream);
auto scanned_block_partition_sizes =
rmm::device_uvector<size_type>(grid_size * num_partitions, stream);
// Holds the total number of rows in each partition
auto global_partition_sizes =
cudf::detail::make_zeroed_device_uvector_async<size_type>(num_partitions, stream);
auto row_partition_offset =
cudf::detail::make_zeroed_device_uvector_async<size_type>(num_rows, stream);
auto const device_input = table_device_view::create(table_to_hash, stream);
auto const hasher = row_hasher<hash_function, hash_has_nulls>(*device_input, seed);
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if (is_power_two(num_partitions)) {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = bitwise_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream.value(), hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
global_partition_sizes.data());
} else {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = modulo_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream.value(), hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
global_partition_sizes.data());
}
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
thrust::exclusive_scan(rmm::exec_policy(stream),
block_partition_sizes.begin(),
block_partition_sizes.end(),
scanned_block_partition_sizes.data());
// Compute exclusive scan of size of each partition to determine offset
// location of each partition in final output.
// TODO This can be done independently on a separate stream
thrust::exclusive_scan(rmm::exec_policy(stream),
global_partition_sizes.begin(),
global_partition_sizes.end(),
global_partition_sizes.begin());
// Copy the result of the exclusive scan to the output offsets array
// to indicate the starting point for each partition in the output
auto const partition_offsets =
cudf::detail::make_std_vector_async(global_partition_sizes, stream);
// When the number of partitions is less than a threshold, we can apply an
// optimization using shared memory to copy values to the output buffer.
// Otherwise, fallback to using scatter.
if (use_optimization) {
std::vector<std::unique_ptr<column>> output_cols(input.num_columns());
// Copy input to output by partition per column
std::transform(input.begin(), input.end(), output_cols.begin(), [&](auto const& col) {
return cudf::type_dispatcher<dispatch_storage_type>(col.type(),
copy_block_partitions_dispatcher{},
col,
num_partitions,
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
scanned_block_partition_sizes.data(),
grid_size,
stream,
mr);
});
if (has_nulls(input)) {
// Use copy_block_partitions to compute a gather map
auto gather_map = compute_gather_map(num_rows,
num_partitions,
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
scanned_block_partition_sizes.data(),
grid_size,
stream);
// Handle bitmask using gather to take advantage of ballot_sync
detail::gather_bitmask(
input, gather_map.begin(), output_cols, detail::gather_bitmask_op::DONT_CHECK, stream, mr);
}
stream.synchronize(); // Async D2H copy must finish before returning host vec
return std::make_pair(std::make_unique<table>(std::move(output_cols)),
std::move(partition_offsets));
} else {
// Compute a scatter map from input to output such that the output rows are
// sorted by partition number
auto row_output_locations{row_partition_numbers.data()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data()};
hipLaunchKernelGGL(( compute_row_output_locations), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream.value(),
row_output_locations, num_rows, num_partitions, scanned_block_partition_sizes_ptr);
// Use the resulting scatter map to materialize the output
auto output = detail::scatter(
input, row_partition_numbers.begin(), row_partition_numbers.end(), input, false, stream, mr);
stream.synchronize(); // Async D2H copy must finish before returning host vec
return std::make_pair(std::move(output), std::move(partition_offsets));
}
}
struct dispatch_map_type {
/**
* @brief Partitions the table `t` according to the `partition_map`.
*
* Algorithm:
* - Compute the histogram of the size each partition
* - Compute the exclusive scan of the histogram to get the offset for each
* partition in the final partitioned output
* - Use a transform iterator to materialize the scatter map of the rows from
* `t` into the final output.
*
* @note JH: It would likely be more efficient to avoid the atomic increments
* in the transform iterator. It would probably be faster to compute a
* per-thread block histogram and compute an exclusive scan of all of the
* per-block histograms (like in hash partition). But I'm purposefully trying
* to reduce memory pressure by avoiding intermediate materializations. Plus,
* atomics resolve in L2 and should be pretty fast since all the offsets will
* fit in L2.
*
*/
template <typename MapType>
std::enable_if_t<is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
// Build a histogram of the number of rows in each partition
rmm::device_uvector<size_type> histogram(num_partitions + 1, stream);
std::size_t temp_storage_bytes{};
std::size_t const num_levels = num_partitions + 1;
size_type const lower_level = 0;
size_type const upper_level = num_partitions;
cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
rmm::device_buffer temp_storage(temp_storage_bytes, stream);
cub::DeviceHistogram::HistogramEven(temp_storage.data(),
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
// `histogram` was created with an extra entry at the end such that an
// exclusive scan will put the total number of rows at the end
thrust::exclusive_scan(
rmm::exec_policy(stream), histogram.begin(), histogram.end(), histogram.begin());
// Copy offsets to host before the transform below modifies the histogram
auto const partition_offsets = cudf::detail::make_std_vector_sync(histogram, stream);
// Unfortunately need to materialize the scatter map because
// `detail::scatter` requires multiple passes through the iterator
rmm::device_uvector<MapType> scatter_map(partition_map.size(), stream);
// For each `partition_map[i]`, atomically increment the corresponding
// partition offset to determine `i`s location in the output
thrust::transform(rmm::exec_policy(stream),
partition_map.begin<MapType>(),
partition_map.end<MapType>(),
scatter_map.begin(),
[offsets = histogram.data()] __device__(auto partition_number) {
return atomicAdd(&offsets[partition_number], 1);
});
// Scatter the rows into their partitions
auto scattered =
cudf::detail::scatter(t, scatter_map.begin(), scatter_map.end(), t, false, stream, mr);
return std::make_pair(std::move(scattered), std::move(partition_offsets));
}
template <typename MapType>
std::enable_if_t<not is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
CUDF_FAIL("Unexpected, non-integral partition map.");
}
};
} // namespace
namespace detail {
namespace local {
template <template <typename> class hash_function>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto table_to_hash = input.select(columns_to_hash);
// Return empty result if there are no partitions or nothing to hash
if (num_partitions <= 0 || input.num_rows() == 0 || table_to_hash.num_columns() == 0) {
return std::make_pair(empty_like(input), std::vector<size_type>{});
}
if (has_nulls(table_to_hash)) {
return hash_partition_table<hash_function, true>(
input, table_to_hash, num_partitions, seed, stream, mr);
} else {
return hash_partition_table<hash_function, false>(
input, table_to_hash, num_partitions, seed, stream, mr);
}
}
} // namespace local
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(t.num_rows() == partition_map.size(),
"Size mismatch between table and partition map.");
CUDF_EXPECTS(not partition_map.has_nulls(), "Unexpected null values in partition_map.");
if (num_partitions == 0 or t.num_rows() == 0) {
return std::make_pair(empty_like(t), std::vector<size_type>{});
}
return cudf::type_dispatcher(
partition_map.type(), dispatch_map_type{}, t, partition_map, num_partitions, stream, mr);
}
} // namespace detail
// Partition based on hash values
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
hash_id hash_function,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
switch (hash_function) {
case (hash_id::HASH_IDENTITY):
for (const size_type& column_id : columns_to_hash) {
if (!is_numeric(input.column(column_id).type()))
CUDF_FAIL("IdentityHash does not support this data type");
}
return detail::local::hash_partition<IdentityHash>(
input, columns_to_hash, num_partitions, seed, stream, mr);
case (hash_id::HASH_MURMUR3):
return detail::local::hash_partition<MurmurHash3_32>(
input, columns_to_hash, num_partitions, seed, stream, mr);
default: CUDF_FAIL("Unsupported hash function in hash_partition");
}
}
// Partition based on an explicit partition map
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(t, partition_map, num_partitions, rmm::cuda_stream_default, mr);
}
} // namespace cudf
| 2f805e5b467ac6fd40e4c07e2fd92a709b30ba2d.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/partitioning.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace {
// Launch configuration for optimized hash partition
constexpr size_type OPTIMIZED_BLOCK_SIZE = 512;
constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8;
constexpr size_type ELEMENTS_PER_THREAD = 2;
constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024;
// Launch configuration for fallback hash partition
constexpr size_type FALLBACK_BLOCK_SIZE = 256;
constexpr size_type FALLBACK_ROWS_PER_THREAD = 1;
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
template <typename hash_value_t>
class modulo_partitioner {
public:
modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {}
__device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; }
private:
const size_type divisor;
};
template <typename T>
bool is_power_two(T number)
{
return (0 == (number & (number - 1)));
}
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses a bitwise mask. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently
* via a single bitwise AND as: n & (d - 1)
*/
template <typename hash_value_t>
class bitwise_partitioner {
public:
bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)}
{
assert(is_power_two(num_partitions));
}
__device__ size_type operator()(hash_value_t hash_value) const
{
return hash_value & mask; // hash_value & (num_partitions - 1)
}
private:
const size_type mask;
};
/**
* @brief Computes which partition each row of a device_table will belong to
based on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the
global size of each partition across all thread blocks.
*
* @param[in] the_table The table whose rows will be partitioned
* @param[in] num_rows The number of rows in the table
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] the_partitioner The functor that maps a rows hash value to a
partition number
* @param[out] row_partition_numbers Array that holds which partition each row
belongs to
* @param[out] row_partition_offset Array that holds the offset of each row in
its partition of
* the thread block
* @param[out] block_partition_sizes Array that holds the size of each partition
for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1
partition(num_partitions -1) size, ...} }
* @param[out] global_partition_sizes The number of rows in each partition.
*/
template <class row_hasher_t, typename partitioner_type>
__global__ void compute_row_partition_numbers(row_hasher_t the_hasher,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type* __restrict__ row_partition_numbers,
size_type* __restrict__ row_partition_offset,
size_type* __restrict__ block_partition_sizes,
size_type* __restrict__ global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while (row_number < num_rows) {
const hash_value_type row_hash_value = the_hasher(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
row_partition_offset[row_number] =
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while (partition_number < num_partitions) {
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/**
* @brief Given an array of partition numbers, computes the final output
location for each element in the output such that all rows with the same
partition are contiguous in memory.
*
* @param row_partition_numbers The array that records the partition number for
each row
* @param num_rows The number of rows
* @param num_partitions THe number of partitions
* @param[out] block_partition_offsets Array that holds the offset of each
partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1
partition(num_partitions -1) offset, ...} }
*/
__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type* __restrict__ block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_offsets[partition_number] =
block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while (row_number < num_rows) {
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the
// corresponding partition offset for this block
const size_type row_output_location =
atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/**
* @brief Move one column from the input table to the hashed table.
*
* @param[in] input_buf Data buffer of the column in the input table
* @param[out] output_buf Preallocated data buffer of the column in the output
* table
* @param[in] num_rows The number of rows in each column
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] row_partition_numbers Array that holds which partition each row
* belongs to
* @param[in] row_partition_offset Array that holds the offset of each row in
* its partition of the thread block.
* @param[in] block_partition_sizes Array that holds the size of each partition
* for each block
* @param[in] scanned_block_partition_sizes The scan of block_partition_sizes
*/
template <typename InputIter, typename DataType>
__global__ void copy_block_partitions(InputIter input_iter,
DataType* __restrict__ output_buf,
const size_type num_rows,
const size_type num_partitions,
size_type const* __restrict__ row_partition_numbers,
size_type const* __restrict__ row_partition_offset,
size_type const* __restrict__ block_partition_sizes,
size_type const* __restrict__ scanned_block_partition_sizes)
{
extern __shared__ char shared_memory[];
auto block_output = reinterpret_cast<DataType*>(shared_memory);
auto partition_offset_shared =
reinterpret_cast<size_type*>(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD);
auto partition_offset_global = partition_offset_shared + num_partitions + 1;
typedef cub::BlockScan<size_type, OPTIMIZED_BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
// use ELEMENTS_PER_THREAD=2 to support upto 1024 partitions
size_type temp_histo[ELEMENTS_PER_THREAD];
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
temp_histo[i] =
block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x];
} else {
temp_histo[i] = 0;
}
}
__syncthreads();
BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo);
__syncthreads();
if (threadIdx.x == 0) { partition_offset_shared[0] = 0; }
// Calculate the offset in shared memory of each partition in this thread
// block
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i];
}
}
// Fetch the offset in the output buffer of each partition in this thread
// block
for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) {
partition_offset_global[ipartition] =
scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x];
}
__syncthreads();
// Fetch the input data to shared memory
for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows;
row_number += blockDim.x * gridDim.x) {
size_type const ipartition = row_partition_numbers[row_number];
block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] =
input_iter[row_number];
}
__syncthreads();
// Copy data from shared memory to output using 32 threads for each partition
constexpr int nthreads_partition = 32;
static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0,
"BLOCK_SIZE must be divisible by number of threads");
for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions;
ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) {
size_type const nelements_partition =
partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition];
for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition;
row_offset += nthreads_partition) {
output_buf[partition_offset_global[ipartition] + row_offset] =
block_output[partition_offset_shared[ipartition] + row_offset];
}
}
}
template <typename InputIter, typename OutputIter>
void copy_block_partitions_impl(InputIter const input,
OutputIter output,
size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
// We need 3 chunks of shared memory:
// 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output
// 2. num_partitions + 1 elements of size_type for per-block partition offsets
// 3. num_partitions + 1 elements of size_type for global partition offsets
int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) +
(num_partitions + 1) * sizeof(size_type) * 2;
copy_block_partitions<<<grid_size, OPTIMIZED_BLOCK_SIZE, smem, stream.value()>>>(
input,
output,
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes);
}
rmm::device_uvector<size_type> compute_gather_map(size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
auto sequence = thrust::make_counting_iterator(0);
rmm::device_uvector<size_type> gather_map(num_rows, stream);
copy_block_partitions_impl(sequence,
gather_map.begin(),
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return gather_map;
}
struct copy_block_partitions_dispatcher {
template <typename DataType, std::enable_if_t<is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr);
copy_block_partitions_impl(input.data<DataType>(),
static_cast<DataType*>(output.data()),
input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return std::make_unique<column>(input.type(), input.size(), std::move(output));
}
template <typename DataType, std::enable_if_t<not is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Use move_to_output_buffer to create an equivalent gather map
auto gather_map = compute_gather_map(input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
// Use gather instead for non-fixed width types
return type_dispatcher(input.type(),
detail::column_gatherer{},
input,
gather_map.begin(),
gather_map.end(),
false,
stream,
mr);
}
};
// NOTE hash_has_nulls must be true if table_to_hash has nulls
template <template <typename> class hash_function, bool hash_has_nulls>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition_table(
table_view const& input,
table_view const& table_to_hash,
size_type num_partitions,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = table_to_hash.num_rows();
bool const use_optimization{num_partitions <= THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL};
auto const block_size = use_optimization ? OPTIMIZED_BLOCK_SIZE : FALLBACK_BLOCK_SIZE;
auto const rows_per_thread =
use_optimization ? OPTIMIZED_ROWS_PER_THREAD : FALLBACK_ROWS_PER_THREAD;
auto const rows_per_block = block_size * rows_per_thread;
// NOTE grid_size is non-const to workaround lambda capture bug in gcc 5.4
auto grid_size = util::div_rounding_up_safe(num_rows, rows_per_block);
// Allocate array to hold which partition each row belongs to
auto row_partition_numbers = rmm::device_uvector<size_type>(num_rows, stream);
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1
// partition(num_partitions -1) size, ...} }
auto block_partition_sizes = rmm::device_uvector<size_type>(grid_size * num_partitions, stream);
auto scanned_block_partition_sizes =
rmm::device_uvector<size_type>(grid_size * num_partitions, stream);
// Holds the total number of rows in each partition
auto global_partition_sizes =
cudf::detail::make_zeroed_device_uvector_async<size_type>(num_partitions, stream);
auto row_partition_offset =
cudf::detail::make_zeroed_device_uvector_async<size_type>(num_rows, stream);
auto const device_input = table_device_view::create(table_to_hash, stream);
auto const hasher = row_hasher<hash_function, hash_has_nulls>(*device_input, seed);
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if (is_power_two(num_partitions)) {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = bitwise_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
compute_row_partition_numbers<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
global_partition_sizes.data());
} else {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = modulo_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
compute_row_partition_numbers<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
global_partition_sizes.data());
}
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
thrust::exclusive_scan(rmm::exec_policy(stream),
block_partition_sizes.begin(),
block_partition_sizes.end(),
scanned_block_partition_sizes.data());
// Compute exclusive scan of size of each partition to determine offset
// location of each partition in final output.
// TODO This can be done independently on a separate stream
thrust::exclusive_scan(rmm::exec_policy(stream),
global_partition_sizes.begin(),
global_partition_sizes.end(),
global_partition_sizes.begin());
// Copy the result of the exclusive scan to the output offsets array
// to indicate the starting point for each partition in the output
auto const partition_offsets =
cudf::detail::make_std_vector_async(global_partition_sizes, stream);
// When the number of partitions is less than a threshold, we can apply an
// optimization using shared memory to copy values to the output buffer.
// Otherwise, fallback to using scatter.
if (use_optimization) {
std::vector<std::unique_ptr<column>> output_cols(input.num_columns());
// Copy input to output by partition per column
std::transform(input.begin(), input.end(), output_cols.begin(), [&](auto const& col) {
return cudf::type_dispatcher<dispatch_storage_type>(col.type(),
copy_block_partitions_dispatcher{},
col,
num_partitions,
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
scanned_block_partition_sizes.data(),
grid_size,
stream,
mr);
});
if (has_nulls(input)) {
// Use copy_block_partitions to compute a gather map
auto gather_map = compute_gather_map(num_rows,
num_partitions,
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
scanned_block_partition_sizes.data(),
grid_size,
stream);
// Handle bitmask using gather to take advantage of ballot_sync
detail::gather_bitmask(
input, gather_map.begin(), output_cols, detail::gather_bitmask_op::DONT_CHECK, stream, mr);
}
stream.synchronize(); // Async D2H copy must finish before returning host vec
return std::make_pair(std::make_unique<table>(std::move(output_cols)),
std::move(partition_offsets));
} else {
// Compute a scatter map from input to output such that the output rows are
// sorted by partition number
auto row_output_locations{row_partition_numbers.data()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data()};
compute_row_output_locations<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(
row_output_locations, num_rows, num_partitions, scanned_block_partition_sizes_ptr);
// Use the resulting scatter map to materialize the output
auto output = detail::scatter(
input, row_partition_numbers.begin(), row_partition_numbers.end(), input, false, stream, mr);
stream.synchronize(); // Async D2H copy must finish before returning host vec
return std::make_pair(std::move(output), std::move(partition_offsets));
}
}
struct dispatch_map_type {
/**
* @brief Partitions the table `t` according to the `partition_map`.
*
* Algorithm:
* - Compute the histogram of the size each partition
* - Compute the exclusive scan of the histogram to get the offset for each
* partition in the final partitioned output
* - Use a transform iterator to materialize the scatter map of the rows from
* `t` into the final output.
*
* @note JH: It would likely be more efficient to avoid the atomic increments
* in the transform iterator. It would probably be faster to compute a
* per-thread block histogram and compute an exclusive scan of all of the
* per-block histograms (like in hash partition). But I'm purposefully trying
* to reduce memory pressure by avoiding intermediate materializations. Plus,
* atomics resolve in L2 and should be pretty fast since all the offsets will
* fit in L2.
*
*/
template <typename MapType>
std::enable_if_t<is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
// Build a histogram of the number of rows in each partition
rmm::device_uvector<size_type> histogram(num_partitions + 1, stream);
std::size_t temp_storage_bytes{};
std::size_t const num_levels = num_partitions + 1;
size_type const lower_level = 0;
size_type const upper_level = num_partitions;
cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
rmm::device_buffer temp_storage(temp_storage_bytes, stream);
cub::DeviceHistogram::HistogramEven(temp_storage.data(),
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
// `histogram` was created with an extra entry at the end such that an
// exclusive scan will put the total number of rows at the end
thrust::exclusive_scan(
rmm::exec_policy(stream), histogram.begin(), histogram.end(), histogram.begin());
// Copy offsets to host before the transform below modifies the histogram
auto const partition_offsets = cudf::detail::make_std_vector_sync(histogram, stream);
// Unfortunately need to materialize the scatter map because
// `detail::scatter` requires multiple passes through the iterator
rmm::device_uvector<MapType> scatter_map(partition_map.size(), stream);
// For each `partition_map[i]`, atomically increment the corresponding
// partition offset to determine `i`s location in the output
thrust::transform(rmm::exec_policy(stream),
partition_map.begin<MapType>(),
partition_map.end<MapType>(),
scatter_map.begin(),
[offsets = histogram.data()] __device__(auto partition_number) {
return atomicAdd(&offsets[partition_number], 1);
});
// Scatter the rows into their partitions
auto scattered =
cudf::detail::scatter(t, scatter_map.begin(), scatter_map.end(), t, false, stream, mr);
return std::make_pair(std::move(scattered), std::move(partition_offsets));
}
template <typename MapType>
std::enable_if_t<not is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
CUDF_FAIL("Unexpected, non-integral partition map.");
}
};
} // namespace
namespace detail {
namespace local {
template <template <typename> class hash_function>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto table_to_hash = input.select(columns_to_hash);
// Return empty result if there are no partitions or nothing to hash
if (num_partitions <= 0 || input.num_rows() == 0 || table_to_hash.num_columns() == 0) {
return std::make_pair(empty_like(input), std::vector<size_type>{});
}
if (has_nulls(table_to_hash)) {
return hash_partition_table<hash_function, true>(
input, table_to_hash, num_partitions, seed, stream, mr);
} else {
return hash_partition_table<hash_function, false>(
input, table_to_hash, num_partitions, seed, stream, mr);
}
}
} // namespace local
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(t.num_rows() == partition_map.size(),
"Size mismatch between table and partition map.");
CUDF_EXPECTS(not partition_map.has_nulls(), "Unexpected null values in partition_map.");
if (num_partitions == 0 or t.num_rows() == 0) {
return std::make_pair(empty_like(t), std::vector<size_type>{});
}
return cudf::type_dispatcher(
partition_map.type(), dispatch_map_type{}, t, partition_map, num_partitions, stream, mr);
}
} // namespace detail
// Partition based on hash values
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
hash_id hash_function,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
switch (hash_function) {
case (hash_id::HASH_IDENTITY):
for (const size_type& column_id : columns_to_hash) {
if (!is_numeric(input.column(column_id).type()))
CUDF_FAIL("IdentityHash does not support this data type");
}
return detail::local::hash_partition<IdentityHash>(
input, columns_to_hash, num_partitions, seed, stream, mr);
case (hash_id::HASH_MURMUR3):
return detail::local::hash_partition<MurmurHash3_32>(
input, columns_to_hash, num_partitions, seed, stream, mr);
default: CUDF_FAIL("Unsupported hash function in hash_partition");
}
}
// Partition based on an explicit partition map
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(t, partition_map, num_partitions, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
127a08508fd6154f8de3ce1fddf7a5adaef647a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-04-24
*/
#include "../../XDevice.h"
#include "../../XUtility.h"
#include "../../XTensor.h"
#include "MergeBlockLists.h"
#include "MergeBlockLists.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
copy a number of blocks (of different sizes) to target positions
>> sourceList - list of data arrays to copy from
>> sourceBlockSizes - the size of the block_i
>> sourceBlockNum - number of blocks to merge
>> targetList - list of data arrays to copy to
*/
__global__
void KernelCopyBlockLists(DTYPE * sourceList[], int * sourceBlockSizes, int sourceBlockNum, DTYPE * targetList[])
{
__shared__ int iBlockSizes[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE * iSourceList[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE * iTargetList[MAX_CUDA_THREAD_NUM_PER_BLOCK];
/* entry index in the block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block index */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (j >= sourceBlockNum)
return;
if (threadIdx.x == 0) {
iBlockSizes[threadIdx.y] = sourceBlockSizes[j];
iSourceList[threadIdx.y] = sourceList[j];
iTargetList[threadIdx.y] = targetList[j];
}
__syncthreads();
if (i < iBlockSizes[threadIdx.y])
iTargetList[threadIdx.y][i] = iSourceList[threadIdx.y][i];
}
/*
merge data by blocks (cuda version)
>> sourceList - list of data arrays (heads of the blocks) to copy from
>> blockSizes - size of the blocks
>> blockNum - number of blocks
>> target - target data array
>> myMem - the memory pool
*/
void _CudaMergeBlockLists(const XList * sourceList, int * blockSizes, int blockNum, void * target, XMem * myMem)
{
CheckNTErrors((myMem != NULL), "No memory pool!");
CheckNTErrors((myMem->devID >= 0), "Wrong device to run!");
int newBlockListSize = sourceList->count * blockNum;
int minBlockSize = MAX_INT;
int maxBlockSize = -MAX_INT;
int realMaxBlockSize = 1;
DTYPE ** sourceArrays = new DTYPE*[newBlockListSize];
DTYPE ** targetArrays = new DTYPE*[newBlockListSize];
int * sizes = new int[newBlockListSize];
int * offsets = new int[sourceList->count];
memset(offsets, 0, sizeof(int) * sourceList->count);
int totalOffset = 0;
for (int k = 0; k < blockNum; k++) {
for (int i = 0; i < sourceList->count; i++) {
CheckNTErrors((blockSizes[i] % sizeof(DTYPE) == 0), "Unsupported block size!");
int j = k * sourceList->count + i;
sizes[j] = blockSizes[i] / sizeof(DTYPE);
sourceArrays[j] = (DTYPE*)sourceList->GetItem(i) + offsets[i];
targetArrays[j] = (DTYPE*)target + totalOffset;
offsets[i] += sizes[i];
totalOffset += sizes[i];
if (minBlockSize > blockSizes[i])
minBlockSize = blockSizes[i];
if (maxBlockSize < blockSizes[i])
maxBlockSize = blockSizes[i];
}
}
CheckNTErrors((minBlockSize % sizeof(DTYPE) == 0), "Unsupported block size!");
CheckNTErrors((maxBlockSize % sizeof(DTYPE) == 0), "Unsupported block size!");
realMaxBlockSize = maxBlockSize / sizeof(DTYPE);
int devIDBackup;
ProtectCudaDev(myMem->devID, devIDBackup);
int cudaGridSizes[3];
int cudaBlockSizes[3];
GDevs.GetCudaThread2D(myMem->devID, realMaxBlockSize, newBlockListSize, MAX_INT,
cudaGridSizes, cudaBlockSizes);
myMem->SetPinBuf();
int * sizesGPU = (int*)myMem->AllocBuf(myMem->devID, sizeof(int) * newBlockListSize, 256);
DTYPE ** sourceArraysGPU = (DTYPE**)myMem->AllocBuf(myMem->devID, sizeof(DTYPE*) * newBlockListSize, 256);
DTYPE ** targetArraysGPU = (DTYPE**)myMem->AllocBuf(myMem->devID, sizeof(DTYPE*) * newBlockListSize, 256);
XMemCopy(sizesGPU, myMem->devID, sizes, -1, sizeof(int) * newBlockListSize);
XMemCopy(sourceArraysGPU, myMem->devID, sourceArrays, -1, sizeof(DTYPE*) * newBlockListSize);
XMemCopy(targetArraysGPU, myMem->devID, targetArrays, -1, sizeof(DTYPE*) * newBlockListSize);
KernelCopyBlockLists << <dim3(cudaGridSizes[0], cudaGridSizes[1]), dim3(cudaBlockSizes[0], cudaBlockSizes[1]) >> >
(sourceArraysGPU, sizesGPU, newBlockListSize, targetArraysGPU);
myMem->BackToPinBuf();
delete[] sourceArrays;
delete[] targetArrays;
delete[] sizes;
delete[] offsets;
BacktoCudaDev(myMem->devID, devIDBackup);
}
#endif // USE_ROCM
} // namespace nts(NiuTrans.Tensor) | 127a08508fd6154f8de3ce1fddf7a5adaef647a6.cu | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-04-24
*/
#include "../../XDevice.h"
#include "../../XUtility.h"
#include "../../XTensor.h"
#include "MergeBlockLists.h"
#include "MergeBlockLists.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
copy a number of blocks (of different sizes) to target positions
>> sourceList - list of data arrays to copy from
>> sourceBlockSizes - the size of the block_i
>> sourceBlockNum - number of blocks to merge
>> targetList - list of data arrays to copy to
*/
__global__
void KernelCopyBlockLists(DTYPE * sourceList[], int * sourceBlockSizes, int sourceBlockNum, DTYPE * targetList[])
{
__shared__ int iBlockSizes[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE * iSourceList[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE * iTargetList[MAX_CUDA_THREAD_NUM_PER_BLOCK];
/* entry index in the block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block index */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (j >= sourceBlockNum)
return;
if (threadIdx.x == 0) {
iBlockSizes[threadIdx.y] = sourceBlockSizes[j];
iSourceList[threadIdx.y] = sourceList[j];
iTargetList[threadIdx.y] = targetList[j];
}
__syncthreads();
if (i < iBlockSizes[threadIdx.y])
iTargetList[threadIdx.y][i] = iSourceList[threadIdx.y][i];
}
/*
merge data by blocks (cuda version)
>> sourceList - list of data arrays (heads of the blocks) to copy from
>> blockSizes - size of the blocks
>> blockNum - number of blocks
>> target - target data array
>> myMem - the memory pool
*/
void _CudaMergeBlockLists(const XList * sourceList, int * blockSizes, int blockNum, void * target, XMem * myMem)
{
CheckNTErrors((myMem != NULL), "No memory pool!");
CheckNTErrors((myMem->devID >= 0), "Wrong device to run!");
int newBlockListSize = sourceList->count * blockNum;
int minBlockSize = MAX_INT;
int maxBlockSize = -MAX_INT;
int realMaxBlockSize = 1;
DTYPE ** sourceArrays = new DTYPE*[newBlockListSize];
DTYPE ** targetArrays = new DTYPE*[newBlockListSize];
int * sizes = new int[newBlockListSize];
int * offsets = new int[sourceList->count];
memset(offsets, 0, sizeof(int) * sourceList->count);
int totalOffset = 0;
for (int k = 0; k < blockNum; k++) {
for (int i = 0; i < sourceList->count; i++) {
CheckNTErrors((blockSizes[i] % sizeof(DTYPE) == 0), "Unsupported block size!");
int j = k * sourceList->count + i;
sizes[j] = blockSizes[i] / sizeof(DTYPE);
sourceArrays[j] = (DTYPE*)sourceList->GetItem(i) + offsets[i];
targetArrays[j] = (DTYPE*)target + totalOffset;
offsets[i] += sizes[i];
totalOffset += sizes[i];
if (minBlockSize > blockSizes[i])
minBlockSize = blockSizes[i];
if (maxBlockSize < blockSizes[i])
maxBlockSize = blockSizes[i];
}
}
CheckNTErrors((minBlockSize % sizeof(DTYPE) == 0), "Unsupported block size!");
CheckNTErrors((maxBlockSize % sizeof(DTYPE) == 0), "Unsupported block size!");
realMaxBlockSize = maxBlockSize / sizeof(DTYPE);
int devIDBackup;
ProtectCudaDev(myMem->devID, devIDBackup);
int cudaGridSizes[3];
int cudaBlockSizes[3];
GDevs.GetCudaThread2D(myMem->devID, realMaxBlockSize, newBlockListSize, MAX_INT,
cudaGridSizes, cudaBlockSizes);
myMem->SetPinBuf();
int * sizesGPU = (int*)myMem->AllocBuf(myMem->devID, sizeof(int) * newBlockListSize, 256);
DTYPE ** sourceArraysGPU = (DTYPE**)myMem->AllocBuf(myMem->devID, sizeof(DTYPE*) * newBlockListSize, 256);
DTYPE ** targetArraysGPU = (DTYPE**)myMem->AllocBuf(myMem->devID, sizeof(DTYPE*) * newBlockListSize, 256);
XMemCopy(sizesGPU, myMem->devID, sizes, -1, sizeof(int) * newBlockListSize);
XMemCopy(sourceArraysGPU, myMem->devID, sourceArrays, -1, sizeof(DTYPE*) * newBlockListSize);
XMemCopy(targetArraysGPU, myMem->devID, targetArrays, -1, sizeof(DTYPE*) * newBlockListSize);
KernelCopyBlockLists << <dim3(cudaGridSizes[0], cudaGridSizes[1]), dim3(cudaBlockSizes[0], cudaBlockSizes[1]) >> >
(sourceArraysGPU, sizesGPU, newBlockListSize, targetArraysGPU);
myMem->BackToPinBuf();
delete[] sourceArrays;
delete[] targetArrays;
delete[] sizes;
delete[] offsets;
BacktoCudaDev(myMem->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
66e60aad00fafc9a1b28c47b6d5d481f2943c079.hip | // !!! This is a file automatically generated by hipify!!!
/**
*
* Maria Isabel Ortiz Naranjo
* Carnet: 18176
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
/**
* CUDA Kernel Device code
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] * B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 240;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 240;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float sum = 0;
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
sum += h_C[i];
if (fabs(h_A[i] * h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("resultado %f \n", sum);
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
} | 66e60aad00fafc9a1b28c47b6d5d481f2943c079.cu | /**
*
* Maria Isabel Ortiz Naranjo
* Carnet: 18176
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] * B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 240;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 240;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float sum = 0;
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
sum += h_C[i];
if (fabs(h_A[i] * h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("resultado %f \n", sum);
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
} |
39f415f71ee9f6e8db1bcc327d822ca822b95ae6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "hpfft.h"
#include <math.h>
#include <cmath>
#include <stdio.h>
#include "../src/io.h"
#define SAMPLING_FREQ 44100
__global__ void cufftShiftPadZeros(float2* output, float* input, int N, int numzeros, int offset){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
output[idx].x = input[(idx + offset) + N/2];
output[idx + N/2 + numzeros].x = input[idx + offset];
}
__global__ void cufftShiftPadZeros(float* output, float* input, int N, int numzeros){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
output[idx] = input[idx + N/2];
output[idx + N/2 + numzeros]= input[idx];
}
__global__ void cufftShiftPadZeros(float2* output, float* input, int N, int numzeros){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
output[idx].x = input[idx + N/2];
output[idx + N/2 + numzeros].x = input[idx];
}
__global__ void cufftShift(float2* output, float* input, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
output[idx].x = input[idx + N/2];
output[idx + N/2].x = input[idx];
}
__global__ void cufftShift(float* output, float* input, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
output[idx] = input[idx + N/2];
output[idx + N/2] = input[idx];
}
__global__ void cufftShift(float* input, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
float tmp = input[idx];
input[idx] = input[idx + N/2];
input[idx + N/2] = tmp;
}
__global__ void cudaWindow(float* input, float* win, int nSamps, int offset){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nSamps){
return;
}
input[idx + offset] = input[idx + offset] * win[idx];
}
__global__ void cudaWindow(float* input, float* output, float* win, int nSamps){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nSamps){
return;
}
output[idx] = input[idx] * win[idx];
}
__global__ void cudaWindow(float* input, float* win, int nSamps){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nSamps){
return;
}
input[idx] = input[idx] * win[idx];
}
__global__ void cudaWindow_HanRT(float* input, float* output, int nSamps){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nSamps){
return;
}
output[idx] = input[idx] * 0.5f * (1.f - cosf(2.f*M_PI*idx / nSamps));
}
__global__ void cudaMagFreq(float2* output, float2* input, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N){
return;
}
output[idx].x = sqrtf(input[idx].x * input[idx].x + input[idx].y * input[idx].y);
output[idx].y = atanf(input[idx].y / input[idx].x);
}
__global__ void cudaMagFreq(float2* input, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N){
return;
}
float2 temp = input[idx];
input[idx].x = sqrtf(temp.x * temp.x + temp.y * temp.y);
input[idx].y = atanf(temp.y / temp.x);
}
__global__ void cudaOverlapAdd(float* backFrame, float* frontFrame, int N, int hopSize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N || idx < hopSize) {
return;
}
frontFrame[idx - hopSize] += backFrame[idx];
}
__global__ void cudaTimeScale(float2* input, int N, int timeScale) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > N) {
return;
}
input[idx].x = input[idx].x * cosf(timeScale * input[idx].y);
input[idx].y = input[idx].x * sinf(timeScale * input[idx].y);
}
__global__ void cudaDivVec(float* input, float N, int scale) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > N) {
return;
}
input[idx] /= scale;
}
__global__ void cudaDivVec(float2* input, float* output, float N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > (int)N) {
return;
}
output[idx] = input[idx].x / N;
}
__global__ void padZeros(float* input, float2* output, int N, int zeros) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > N + zeros) {
return;
}
if (idx > zeros / 2 && idx < zeros / 2 + N) {
output[idx].x = input[idx - (zeros / 2)];
}
else {
output[idx].x = 0;
}
}
//#define DEBUGCUFFT
//#define DEBUGMAG
//#define DEBUGpad
//#define DEBUGwindow
namespace CudaPhase{
///usedbytwod
using FFT::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
void pv_analysis(float2* output, float2* fft, float* input, float* intermediary, float* win, int N) {
cudaWindow << <1, N >> > (input, intermediary, win, N);
checkCUDAError_("Window analysis", __LINE__);
#ifdef DEBUGwindow
float *debug_arr;
hipMallocManaged((void**)&debug_arr, sizeof(float) * N, hipMemAttachHost);
checkCUDAError_("Error debugging input after WINDOW (malloc)", __LINE__);
hipMemcpy(debug_arr,input, sizeof(float) * N,hipMemcpyDeviceToHost);
checkCUDAError_("Error debugging input after WINDOW (memcpy)", __LINE__);
printf("in\n");
printArraywNewLines(N, debug_arr);
hipMemcpy(debug_arr,intermediary, sizeof(float) * N,hipMemcpyDeviceToHost);
checkCUDAError_("Error debugging intermediary after WINDOW (mempy)", __LINE__);
printf("intermediary\n");
printArraywNewLines(N, debug_arr);
hipFree(debug_arr);
#endif
hipLaunchKernelGGL(( cufftShiftPadZeros), dim3(1), dim3(N/2), 0, 0, output, intermediary, N, N);
checkCUDAError_("pad zero analysis", __LINE__);
#ifdef DEBUGpad
float2 *debug_arr1;
hipMallocManaged((void**)&debug_arr1, sizeof(float2) *2 * N, hipMemAttachHost);
checkCUDAError_("Error debugging output after cufftshift (malloc)", __LINE__);
hipMemcpy(debug_arr1,output, sizeof(float2) *2 * N,hipMemcpyDeviceToHost);
checkCUDAError_("Error debugging output after cufftshift (memcpy)", __LINE__);
printf("out\n");
printArraywNewLines(2*N, debug_arr1);
hipFree(debug_arr1);
#endif
FFT::HPFFT::computeGPUFFT(2*N, 2, output, fft);
checkCUDAError_("Cufft Error analysis", __LINE__);
#ifdef DEBUGFFT
float2 *debug_arr2;
hipMallocManaged((void**)&debug_arr2, sizeof(float2) * N, hipMemAttachHost);
hipMemcpy(debug_arr2,output, sizeof(float2) * N,hipMemcpyDeviceToHost);
printf("postcufft\n");
printArraywNewLines(N, debug_arr2);
hipFree(debug_arr2);
#endif
cudaMagFreq << <1,2* N >> > (output, 2*N);
checkCUDAError_("magfreq Error analysis kernel.cu", __LINE__);
}
void pv_analysis_RT(float2* output, float2* fft, float* input, float* intermediary, float* win, int N, hipStream_t* stream) {
cudaWindow_HanRT<< <1, N , 0 , *stream>> > (input, intermediary, N);
checkCUDAError_("Window analysis", __LINE__);
#ifdef DEBUGwindow
float *debug_arr;
hipMallocManaged((void**)&debug_arr, sizeof(float) * N, hipMemAttachHost);
checkCUDAError_("Error debugging input after WINDOW (malloc)", __LINE__);
hipMemcpy(debug_arr,input, sizeof(float) * N,hipMemcpyDeviceToHost);
checkCUDAError_("Error debugging input after WINDOW (memcpy)", __LINE__);
printf("in\n");
printArraywNewLines(N, debug_arr);
hipMemcpy(debug_arr,intermediary, sizeof(float) * N,hipMemcpyDeviceToHost);
checkCUDAError_("Error debugging intermediary after WINDOW (mempy)", __LINE__);
printf("intermediary\n");
printArraywNewLines(N, debug_arr);
hipFree(debug_arr);
#endif
hipLaunchKernelGGL(( cufftShiftPadZeros), dim3(1), dim3(N/2), 0, *stream, output, intermediary, N, N);
checkCUDAError_("pad zero analysis", __LINE__);
#ifdef DEBUGpad
float2 *debug_arr1;
hipMallocManaged((void**)&debug_arr1, sizeof(float2) *2 * N, hipMemAttachHost);
checkCUDAError_("Error debugging output after cufftshift (malloc)", __LINE__);
hipMemcpy(debug_arr1,output, sizeof(float2) *2 * N,hipMemcpyDeviceToHost);
checkCUDAError_("Error debugging output after cufftshift (memcpy)", __LINE__);
printf("out\n");
printArraywNewLines(2*N, debug_arr1);
hipFree(debug_arr1);
#endif
FFT::HPFFT::computeGPUFFT_RT(2*N, 2, output, fft, stream);
checkCUDAError_("Cufft Error analysis", __LINE__);
#ifdef DEBUGFFT
float2 *debug_arr2;
hipMallocManaged((void**)&debug_arr2, sizeof(float2) * N, hipMemAttachHost);
hipMemcpy(debug_arr2,output, sizeof(float2) * N,hipMemcpyDeviceToHost);
printf("postcufft\n");
printArraywNewLines(N, debug_arr2);
hipFree(debug_arr2);
#endif
cudaMagFreq << <1,2* N, 0,*stream >> > (output, 2*N);
checkCUDAError_("magfreq Error analysis kernel.cu", __LINE__);
}
//#define DEBUGIFFT
void resynthesis(float* output, float* backFrame, float2* frontFrame, float2* intermediary, float* win, int N, int hopSize) {
cudaTimeScale << <1, 2* N >> > (frontFrame,2* N, 1);
FFT::HPFFT::computeGPUIFFT(2*N, 2, frontFrame, intermediary);
checkCUDAError_("ifft error");
#ifdef DEBUGIFFT
float2 *debug_arr2;
hipMallocManaged((void**)&debug_arr2, sizeof(float2) * 2 * N, hipMemAttachHost);
hipMemcpy(debug_arr2,intermediary, sizeof(float2) * 2 * N,hipMemcpyDeviceToHost);
printf("postcufft\n");
printArraywNewLines(N, debug_arr2);
hipFree(debug_arr2);
#endif
cudaDivVec << <1, N >> > ( intermediary,output, N);
checkCUDAError_("divvec error");
hipLaunchKernelGGL(( cufftShift), dim3(1),dim3(N/2), 0, 0, output, N);
checkCUDAError_("shift error");
hipLaunchKernelGGL(( cudaWindow), dim3(1), dim3(N), 0, 0, output, win,N);
checkCUDAError_("window error");
hipLaunchKernelGGL(( cudaOverlapAdd), dim3(1),dim3(N), 0, 0, backFrame, output, N, hopSize);
checkCUDAError_("add error");
}
void test_overlap_add(float* input, float* output, float* intermediary, float* backFrame, float* win, int N, int hopSize){
cudaWindow<< <1,N >> > (input, intermediary, win, N);
checkCUDAError_("window error", __LINE__);
hipLaunchKernelGGL(( cufftShift), dim3(1), dim3(N/2), 0, 0, output, intermediary, N);
checkCUDAError_("shift error", __LINE__);
hipLaunchKernelGGL(( cufftShift), dim3(1),dim3(N/2), 0, 0, output, N);
checkCUDAError_("shift error", __LINE__);
hipLaunchKernelGGL(( cudaWindow), dim3(1), dim3(N), 0, 0, output, win, N);
hipLaunchKernelGGL(( cudaOverlapAdd), dim3(1),dim3(N), 0, 0, backFrame, output, N, hopSize);
}
void pv_analysis_CUFFT(float2* output, float2* fft, float* input, float* intermediary, float* win, int N) {
timer().startGpuTimer();
cudaWindow<< <1,N >> > (input, intermediary, win, N);
#ifdef DEBUGwindow
float *debug_arr;
hipMallocManaged((void**)&debug_arr, sizeof(float) * N, hipMemAttachHost);
hipMemcpy(debug_arr,input, sizeof(float) * N,hipMemcpyDeviceToHost);
printf("in\n");
printArraywNewLines(N, debug_arr);
hipMemcpy(debug_arr,intermediary, sizeof(float) * N,hipMemcpyDeviceToHost);
printf("intermediary\n");
printArraywNewLines(N, debug_arr);
hipFree(debug_arr);
#endif
checkCUDAError_("Window analysis", __LINE__);
hipLaunchKernelGGL(( cufftShiftPadZeros), dim3(1), dim3(N/2), 0, 0, output, intermediary, N, N);
#ifdef DEBUGpad
float2 *debug_arr1;
hipMallocManaged((void**)&debug_arr1, sizeof(float2) * N, hipMemAttachHost);
hipMemcpy(debug_arr1,output, sizeof(float2) * N,hipMemcpyDeviceToHost);
printf("out\n");
printArraywNewLines(N, debug_arr1);
hipFree(debug_arr1);
#endif
checkCUDAError_("pad zero analysis", __LINE__);
hipfftHandle plan;
hipfftPlan1d(&plan, 2 * N, HIPFFT_C2C, 1);
hipfftExecC2C(plan, (hipfftComplex *)output, (hipfftComplex *)output, HIPFFT_FORWARD);
checkCUDAError_("Cufft Error analysis", __LINE__);
#ifdef DEBUGCUFFT
float2 *debug_arr2;
hipMallocManaged((void**)&debug_arr2, sizeof(float2) *2* N, hipMemAttachHost);
hipMemcpy(debug_arr2,output, sizeof(float2) *2* N,hipMemcpyDeviceToHost);
printf("postcufft\n");
printArraywNewLines(2*N, debug_arr2);
hipFree(debug_arr2);
#endif
hipfftDestroy(plan);
cudaMagFreq << <1,2 * N >> > (output, 2*N);
checkCUDAError_("magfreq Error analysis", __LINE__);
#ifdef DEBUGMAG
float2 *debug_arr3;
hipMallocManaged((void**)&debug_arr3, sizeof(float2) *2 * N, hipMemAttachHost);
hipMemcpy(debug_arr3,output, sizeof(float2) *2 * N,hipMemcpyDeviceToHost);
printf("postMagnitude\n");
printArraywNewLines(2*N, debug_arr3);
hipFree(debug_arr3);
#endif
timer().endGpuTimer();
}
//#define DEBUGTS
//#define DEBUGIFFT
//#define DEBUGSHIFTRE
void resynthesis_CUFFT(float* output, float* backFrame, float2* frontFrame, float* win,int N, int hopSize) {
timer().startGpuTimer();
cudaTimeScale << <1,2*N >> > (frontFrame,2* N, 1);
#ifdef DEBUGTS
float2 *debug_arr2;
hipMallocManaged((void**)&debug_arr2, sizeof(float2) * 2 * N, hipMemAttachHost);
hipMemcpy(debug_arr2,frontFrame, sizeof(float2) * 2 * N,hipMemcpyDeviceToHost);
printf("postTS\n");
printArraywNewLines(N, debug_arr2);
hipFree(debug_arr2);
#endif
hipfftHandle plan;
hipfftPlan1d(&plan, N, HIPFFT_C2R, 1);
checkCUDAError_("Cufft Plan IFFT Error", __LINE__);
hipfftExecC2R(plan, (hipfftComplex*)frontFrame, (hipfftReal *)output);
checkCUDAError_("ifft error");
hipfftDestroy(plan);
checkCUDAError_("cufftDestory error");
#ifdef DEBUGIFFT
float *debug_arr;
hipMallocManaged((void**)&debug_arr, sizeof(float) * N, hipMemAttachHost);
checkCUDAError_("Error debugging output after ifft (malloc)", __LINE__);
hipMemcpy(debug_arr,output, sizeof(float) * N,hipMemcpyHostToHost);
checkCUDAError_("Error debugging output after ifft (memcpy)", __LINE__);
printf("CU IFFT\n");
printArraywNewLines(N, debug_arr);
hipFree(debug_arr);
#endif
cudaDivVec << <1, N >> > (output, N, N);
checkCUDAError_("divvec error");
#ifdef DEBUGSCALE
float *debug_arr1;
hipMallocManaged((void**)&debug_arr1, sizeof(float) * N, hipMemAttachHost);
checkCUDAError_("Error debugging output after ifft (malloc)", __LINE__);
hipMemcpy(debug_arr1, output, sizeof(float) * N,hipMemcpyHostToHost);
checkCUDAError_("Error debugging output after ifft (memcpy)", __LINE__);
printf("SCALE RE\n");
printArraywNewLines(N, debug_arr1);
hipFree(debug_arr1);
#endif
hipLaunchKernelGGL(( cufftShift), dim3(1),dim3(N/2), 0, 0, output, N);
checkCUDAError_("shift error");
#ifdef DEBUGSHIFTRE
float *debug_arr3;
hipMallocManaged((void**)&debug_arr3, sizeof(float) * N, hipMemAttachHost);
checkCUDAError_("Error debugging output after ifft (malloc)", __LINE__);
hipMemcpy(debug_arr3, output, sizeof(float) * N,hipMemcpyHostToHost);
checkCUDAError_("Error debugging output after ifft (memcpy)", __LINE__);
printf("SHIFT RE\n");
printArraywNewLines(N, debug_arr3);
hipFree(debug_arr3);
#endif
hipLaunchKernelGGL(( cudaWindow), dim3(1), dim3(N), 0, 0, output, win, N);
checkCUDAError_("window error");
#ifdef DEBUGSHIFTRE
float *debug_arr4;
hipMallocManaged((void**)&debug_arr4, sizeof(float) * N, hipMemAttachHost);
checkCUDAError_("Error debugging output after ifft (malloc)", __LINE__);
hipMemcpy(debug_arr4, output, sizeof(float) * N,hipMemcpyHostToHost);
checkCUDAError_("Error debugging output after ifft (memcpy)", __LINE__);
printf("WINDOW resynth\n");
printArraywNewLines(N, debug_arr4);
hipFree(debug_arr4);
#endif
hipLaunchKernelGGL(( cudaOverlapAdd), dim3(1),dim3(N), 0, 0, backFrame, output, N, hopSize);
checkCUDAError_("add error");
#ifdef DEBUGOADD
float *debug_arr5;
hipMallocManaged((void**)&debug_arr5, sizeof(float) * N, hipMemAttachHost);
checkCUDAError_("Error debugging output after ifft (malloc)", __LINE__);
hipMemcpy(debug_arr5, output, sizeof(float) * N,hipMemcpyHostToHost);
checkCUDAError_("Error debugging output after ifft (memcpy)", __LINE__);
printf("WINDOW resynth\n");
printArraywNewLines(N, debug_arr5);
hipFree(debug_arr5);
#endif
timer().endGpuTimer();
}
} | 39f415f71ee9f6e8db1bcc327d822ca822b95ae6.cu | #include "kernel.h"
#include "hpfft.h"
#include <math.h>
#include <cmath>
#include <stdio.h>
#include "../src/io.h"
#define SAMPLING_FREQ 44100
__global__ void cufftShiftPadZeros(float2* output, float* input, int N, int numzeros, int offset){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
output[idx].x = input[(idx + offset) + N/2];
output[idx + N/2 + numzeros].x = input[idx + offset];
}
__global__ void cufftShiftPadZeros(float* output, float* input, int N, int numzeros){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
output[idx] = input[idx + N/2];
output[idx + N/2 + numzeros]= input[idx];
}
__global__ void cufftShiftPadZeros(float2* output, float* input, int N, int numzeros){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
output[idx].x = input[idx + N/2];
output[idx + N/2 + numzeros].x = input[idx];
}
__global__ void cufftShift(float2* output, float* input, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
output[idx].x = input[idx + N/2];
output[idx + N/2].x = input[idx];
}
__global__ void cufftShift(float* output, float* input, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
output[idx] = input[idx + N/2];
output[idx + N/2] = input[idx];
}
__global__ void cufftShift(float* input, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N / 2){
return;
}
float tmp = input[idx];
input[idx] = input[idx + N/2];
input[idx + N/2] = tmp;
}
__global__ void cudaWindow(float* input, float* win, int nSamps, int offset){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nSamps){
return;
}
input[idx + offset] = input[idx + offset] * win[idx];
}
__global__ void cudaWindow(float* input, float* output, float* win, int nSamps){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nSamps){
return;
}
output[idx] = input[idx] * win[idx];
}
__global__ void cudaWindow(float* input, float* win, int nSamps){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nSamps){
return;
}
input[idx] = input[idx] * win[idx];
}
__global__ void cudaWindow_HanRT(float* input, float* output, int nSamps){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nSamps){
return;
}
output[idx] = input[idx] * 0.5f * (1.f - cosf(2.f*M_PI*idx / nSamps));
}
__global__ void cudaMagFreq(float2* output, float2* input, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N){
return;
}
output[idx].x = sqrtf(input[idx].x * input[idx].x + input[idx].y * input[idx].y);
output[idx].y = atanf(input[idx].y / input[idx].x);
}
__global__ void cudaMagFreq(float2* input, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= N){
return;
}
float2 temp = input[idx];
input[idx].x = sqrtf(temp.x * temp.x + temp.y * temp.y);
input[idx].y = atanf(temp.y / temp.x);
}
__global__ void cudaOverlapAdd(float* backFrame, float* frontFrame, int N, int hopSize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N || idx < hopSize) {
return;
}
frontFrame[idx - hopSize] += backFrame[idx];
}
__global__ void cudaTimeScale(float2* input, int N, int timeScale) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > N) {
return;
}
input[idx].x = input[idx].x * cosf(timeScale * input[idx].y);
input[idx].y = input[idx].x * sinf(timeScale * input[idx].y);
}
__global__ void cudaDivVec(float* input, float N, int scale) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > N) {
return;
}
input[idx] /= scale;
}
__global__ void cudaDivVec(float2* input, float* output, float N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > (int)N) {
return;
}
output[idx] = input[idx].x / N;
}
__global__ void padZeros(float* input, float2* output, int N, int zeros) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > N + zeros) {
return;
}
if (idx > zeros / 2 && idx < zeros / 2 + N) {
output[idx].x = input[idx - (zeros / 2)];
}
else {
output[idx].x = 0;
}
}
//#define DEBUGCUFFT
//#define DEBUGMAG
//#define DEBUGpad
//#define DEBUGwindow
namespace CudaPhase{
///usedbytwod
using FFT::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
void pv_analysis(float2* output, float2* fft, float* input, float* intermediary, float* win, int N) {
cudaWindow << <1, N >> > (input, intermediary, win, N);
checkCUDAError_("Window analysis", __LINE__);
#ifdef DEBUGwindow
float *debug_arr;
cudaMallocManaged((void**)&debug_arr, sizeof(float) * N, cudaMemAttachHost);
checkCUDAError_("Error debugging input after WINDOW (malloc)", __LINE__);
cudaMemcpy(debug_arr,input, sizeof(float) * N,cudaMemcpyDeviceToHost);
checkCUDAError_("Error debugging input after WINDOW (memcpy)", __LINE__);
printf("in\n");
printArraywNewLines(N, debug_arr);
cudaMemcpy(debug_arr,intermediary, sizeof(float) * N,cudaMemcpyDeviceToHost);
checkCUDAError_("Error debugging intermediary after WINDOW (mempy)", __LINE__);
printf("intermediary\n");
printArraywNewLines(N, debug_arr);
cudaFree(debug_arr);
#endif
cufftShiftPadZeros<<<1, N/2>>>(output, intermediary, N, N);
checkCUDAError_("pad zero analysis", __LINE__);
#ifdef DEBUGpad
float2 *debug_arr1;
cudaMallocManaged((void**)&debug_arr1, sizeof(float2) *2 * N, cudaMemAttachHost);
checkCUDAError_("Error debugging output after cufftshift (malloc)", __LINE__);
cudaMemcpy(debug_arr1,output, sizeof(float2) *2 * N,cudaMemcpyDeviceToHost);
checkCUDAError_("Error debugging output after cufftshift (memcpy)", __LINE__);
printf("out\n");
printArraywNewLines(2*N, debug_arr1);
cudaFree(debug_arr1);
#endif
FFT::HPFFT::computeGPUFFT(2*N, 2, output, fft);
checkCUDAError_("Cufft Error analysis", __LINE__);
#ifdef DEBUGFFT
float2 *debug_arr2;
cudaMallocManaged((void**)&debug_arr2, sizeof(float2) * N, cudaMemAttachHost);
cudaMemcpy(debug_arr2,output, sizeof(float2) * N,cudaMemcpyDeviceToHost);
printf("postcufft\n");
printArraywNewLines(N, debug_arr2);
cudaFree(debug_arr2);
#endif
cudaMagFreq << <1,2* N >> > (output, 2*N);
checkCUDAError_("magfreq Error analysis kernel.cu", __LINE__);
}
void pv_analysis_RT(float2* output, float2* fft, float* input, float* intermediary, float* win, int N, cudaStream_t* stream) {
cudaWindow_HanRT<< <1, N , 0 , *stream>> > (input, intermediary, N);
checkCUDAError_("Window analysis", __LINE__);
#ifdef DEBUGwindow
float *debug_arr;
cudaMallocManaged((void**)&debug_arr, sizeof(float) * N, cudaMemAttachHost);
checkCUDAError_("Error debugging input after WINDOW (malloc)", __LINE__);
cudaMemcpy(debug_arr,input, sizeof(float) * N,cudaMemcpyDeviceToHost);
checkCUDAError_("Error debugging input after WINDOW (memcpy)", __LINE__);
printf("in\n");
printArraywNewLines(N, debug_arr);
cudaMemcpy(debug_arr,intermediary, sizeof(float) * N,cudaMemcpyDeviceToHost);
checkCUDAError_("Error debugging intermediary after WINDOW (mempy)", __LINE__);
printf("intermediary\n");
printArraywNewLines(N, debug_arr);
cudaFree(debug_arr);
#endif
cufftShiftPadZeros<<<1, N/2, 0, *stream>>>(output, intermediary, N, N);
checkCUDAError_("pad zero analysis", __LINE__);
#ifdef DEBUGpad
float2 *debug_arr1;
cudaMallocManaged((void**)&debug_arr1, sizeof(float2) *2 * N, cudaMemAttachHost);
checkCUDAError_("Error debugging output after cufftshift (malloc)", __LINE__);
cudaMemcpy(debug_arr1,output, sizeof(float2) *2 * N,cudaMemcpyDeviceToHost);
checkCUDAError_("Error debugging output after cufftshift (memcpy)", __LINE__);
printf("out\n");
printArraywNewLines(2*N, debug_arr1);
cudaFree(debug_arr1);
#endif
FFT::HPFFT::computeGPUFFT_RT(2*N, 2, output, fft, stream);
checkCUDAError_("Cufft Error analysis", __LINE__);
#ifdef DEBUGFFT
float2 *debug_arr2;
cudaMallocManaged((void**)&debug_arr2, sizeof(float2) * N, cudaMemAttachHost);
cudaMemcpy(debug_arr2,output, sizeof(float2) * N,cudaMemcpyDeviceToHost);
printf("postcufft\n");
printArraywNewLines(N, debug_arr2);
cudaFree(debug_arr2);
#endif
cudaMagFreq << <1,2* N, 0,*stream >> > (output, 2*N);
checkCUDAError_("magfreq Error analysis kernel.cu", __LINE__);
}
//#define DEBUGIFFT
void resynthesis(float* output, float* backFrame, float2* frontFrame, float2* intermediary, float* win, int N, int hopSize) {
cudaTimeScale << <1, 2* N >> > (frontFrame,2* N, 1);
FFT::HPFFT::computeGPUIFFT(2*N, 2, frontFrame, intermediary);
checkCUDAError_("ifft error");
#ifdef DEBUGIFFT
float2 *debug_arr2;
cudaMallocManaged((void**)&debug_arr2, sizeof(float2) * 2 * N, cudaMemAttachHost);
cudaMemcpy(debug_arr2,intermediary, sizeof(float2) * 2 * N,cudaMemcpyDeviceToHost);
printf("postcufft\n");
printArraywNewLines(N, debug_arr2);
cudaFree(debug_arr2);
#endif
cudaDivVec << <1, N >> > ( intermediary,output, N);
checkCUDAError_("divvec error");
cufftShift<<<1,N/2>>>(output, N);
checkCUDAError_("shift error");
cudaWindow<<<1, N>>>(output, win,N);
checkCUDAError_("window error");
cudaOverlapAdd<<<1,N>>>(backFrame, output, N, hopSize);
checkCUDAError_("add error");
}
void test_overlap_add(float* input, float* output, float* intermediary, float* backFrame, float* win, int N, int hopSize){
cudaWindow<< <1,N >> > (input, intermediary, win, N);
checkCUDAError_("window error", __LINE__);
cufftShift<<<1, N/2>>>(output, intermediary, N);
checkCUDAError_("shift error", __LINE__);
cufftShift<<<1,N/2>>>(output, N);
checkCUDAError_("shift error", __LINE__);
cudaWindow<<<1, N>>>(output, win, N);
cudaOverlapAdd<<<1,N>>>(backFrame, output, N, hopSize);
}
void pv_analysis_CUFFT(float2* output, float2* fft, float* input, float* intermediary, float* win, int N) {
timer().startGpuTimer();
cudaWindow<< <1,N >> > (input, intermediary, win, N);
#ifdef DEBUGwindow
float *debug_arr;
cudaMallocManaged((void**)&debug_arr, sizeof(float) * N, cudaMemAttachHost);
cudaMemcpy(debug_arr,input, sizeof(float) * N,cudaMemcpyDeviceToHost);
printf("in\n");
printArraywNewLines(N, debug_arr);
cudaMemcpy(debug_arr,intermediary, sizeof(float) * N,cudaMemcpyDeviceToHost);
printf("intermediary\n");
printArraywNewLines(N, debug_arr);
cudaFree(debug_arr);
#endif
checkCUDAError_("Window analysis", __LINE__);
cufftShiftPadZeros<<<1, N/2>>>(output, intermediary, N, N);
#ifdef DEBUGpad
float2 *debug_arr1;
cudaMallocManaged((void**)&debug_arr1, sizeof(float2) * N, cudaMemAttachHost);
cudaMemcpy(debug_arr1,output, sizeof(float2) * N,cudaMemcpyDeviceToHost);
printf("out\n");
printArraywNewLines(N, debug_arr1);
cudaFree(debug_arr1);
#endif
checkCUDAError_("pad zero analysis", __LINE__);
cufftHandle plan;
cufftPlan1d(&plan, 2 * N, CUFFT_C2C, 1);
cufftExecC2C(plan, (cufftComplex *)output, (cufftComplex *)output, CUFFT_FORWARD);
checkCUDAError_("Cufft Error analysis", __LINE__);
#ifdef DEBUGCUFFT
float2 *debug_arr2;
cudaMallocManaged((void**)&debug_arr2, sizeof(float2) *2* N, cudaMemAttachHost);
cudaMemcpy(debug_arr2,output, sizeof(float2) *2* N,cudaMemcpyDeviceToHost);
printf("postcufft\n");
printArraywNewLines(2*N, debug_arr2);
cudaFree(debug_arr2);
#endif
cufftDestroy(plan);
cudaMagFreq << <1,2 * N >> > (output, 2*N);
checkCUDAError_("magfreq Error analysis", __LINE__);
#ifdef DEBUGMAG
float2 *debug_arr3;
cudaMallocManaged((void**)&debug_arr3, sizeof(float2) *2 * N, cudaMemAttachHost);
cudaMemcpy(debug_arr3,output, sizeof(float2) *2 * N,cudaMemcpyDeviceToHost);
printf("postMagnitude\n");
printArraywNewLines(2*N, debug_arr3);
cudaFree(debug_arr3);
#endif
timer().endGpuTimer();
}
//#define DEBUGTS
//#define DEBUGIFFT
//#define DEBUGSHIFTRE
void resynthesis_CUFFT(float* output, float* backFrame, float2* frontFrame, float* win,int N, int hopSize) {
timer().startGpuTimer();
cudaTimeScale << <1,2*N >> > (frontFrame,2* N, 1);
#ifdef DEBUGTS
float2 *debug_arr2;
cudaMallocManaged((void**)&debug_arr2, sizeof(float2) * 2 * N, cudaMemAttachHost);
cudaMemcpy(debug_arr2,frontFrame, sizeof(float2) * 2 * N,cudaMemcpyDeviceToHost);
printf("postTS\n");
printArraywNewLines(N, debug_arr2);
cudaFree(debug_arr2);
#endif
cufftHandle plan;
cufftPlan1d(&plan, N, CUFFT_C2R, 1);
checkCUDAError_("Cufft Plan IFFT Error", __LINE__);
cufftExecC2R(plan, (cufftComplex*)frontFrame, (cufftReal *)output);
checkCUDAError_("ifft error");
cufftDestroy(plan);
checkCUDAError_("cufftDestory error");
#ifdef DEBUGIFFT
float *debug_arr;
cudaMallocManaged((void**)&debug_arr, sizeof(float) * N, cudaMemAttachHost);
checkCUDAError_("Error debugging output after ifft (malloc)", __LINE__);
cudaMemcpy(debug_arr,output, sizeof(float) * N,cudaMemcpyHostToHost);
checkCUDAError_("Error debugging output after ifft (memcpy)", __LINE__);
printf("CU IFFT\n");
printArraywNewLines(N, debug_arr);
cudaFree(debug_arr);
#endif
cudaDivVec << <1, N >> > (output, N, N);
checkCUDAError_("divvec error");
#ifdef DEBUGSCALE
float *debug_arr1;
cudaMallocManaged((void**)&debug_arr1, sizeof(float) * N, cudaMemAttachHost);
checkCUDAError_("Error debugging output after ifft (malloc)", __LINE__);
cudaMemcpy(debug_arr1, output, sizeof(float) * N,cudaMemcpyHostToHost);
checkCUDAError_("Error debugging output after ifft (memcpy)", __LINE__);
printf("SCALE RE\n");
printArraywNewLines(N, debug_arr1);
cudaFree(debug_arr1);
#endif
cufftShift<<<1,N/2>>>(output, N);
checkCUDAError_("shift error");
#ifdef DEBUGSHIFTRE
float *debug_arr3;
cudaMallocManaged((void**)&debug_arr3, sizeof(float) * N, cudaMemAttachHost);
checkCUDAError_("Error debugging output after ifft (malloc)", __LINE__);
cudaMemcpy(debug_arr3, output, sizeof(float) * N,cudaMemcpyHostToHost);
checkCUDAError_("Error debugging output after ifft (memcpy)", __LINE__);
printf("SHIFT RE\n");
printArraywNewLines(N, debug_arr3);
cudaFree(debug_arr3);
#endif
cudaWindow<<<1, N>>>(output, win, N);
checkCUDAError_("window error");
#ifdef DEBUGSHIFTRE
float *debug_arr4;
cudaMallocManaged((void**)&debug_arr4, sizeof(float) * N, cudaMemAttachHost);
checkCUDAError_("Error debugging output after ifft (malloc)", __LINE__);
cudaMemcpy(debug_arr4, output, sizeof(float) * N,cudaMemcpyHostToHost);
checkCUDAError_("Error debugging output after ifft (memcpy)", __LINE__);
printf("WINDOW resynth\n");
printArraywNewLines(N, debug_arr4);
cudaFree(debug_arr4);
#endif
cudaOverlapAdd<<<1,N>>>(backFrame, output, N, hopSize);
checkCUDAError_("add error");
#ifdef DEBUGOADD
float *debug_arr5;
cudaMallocManaged((void**)&debug_arr5, sizeof(float) * N, cudaMemAttachHost);
checkCUDAError_("Error debugging output after ifft (malloc)", __LINE__);
cudaMemcpy(debug_arr5, output, sizeof(float) * N,cudaMemcpyHostToHost);
checkCUDAError_("Error debugging output after ifft (memcpy)", __LINE__);
printf("WINDOW resynth\n");
printArraywNewLines(N, debug_arr5);
cudaFree(debug_arr5);
#endif
timer().endGpuTimer();
}
} |
66c1abdec19cf4e7f1f6d5bf6ec8661f3747f63a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void _break(char *text,const int size)
{
char temp_key[key_size];
temp_key[0] = blockIdx.x/10 + 48;
temp_key[1] = blockIdx.x%10 + 48;
temp_key[2] = blockIdx.y/10 + 48;
temp_key[3] = blockIdx.y%10 + 48;
temp_key[4] = blockIdx.z + 48;
temp_key[5] = threadIdx.x + 48;
temp_key[6] = threadIdx.y + 48;
temp_key[7] = threadIdx.z + 48;
for(int i = 0; i < size; i++)
{
switch(text[i] ^ temp_key[i % key_size])
{
case '|':
case '~':
case '^':
case '*':
case '+':
case '_':
case '{':
case '}':
case '\\':
case '#':
return;
}
}
printf("%c%c%c%c%c%c%c%c\n",temp_key[0],temp_key[1],temp_key[2],temp_key[3],temp_key[4]
,temp_key[5],temp_key[6],temp_key[7]);
} | 66c1abdec19cf4e7f1f6d5bf6ec8661f3747f63a.cu | __global__
void _break(char *text,const int size)
{
char temp_key[key_size];
temp_key[0] = blockIdx.x/10 + 48;
temp_key[1] = blockIdx.x%10 + 48;
temp_key[2] = blockIdx.y/10 + 48;
temp_key[3] = blockIdx.y%10 + 48;
temp_key[4] = blockIdx.z + 48;
temp_key[5] = threadIdx.x + 48;
temp_key[6] = threadIdx.y + 48;
temp_key[7] = threadIdx.z + 48;
for(int i = 0; i < size; i++)
{
switch(text[i] ^ temp_key[i % key_size])
{
case '|':
case '~':
case '^':
case '*':
case '+':
case '_':
case '{':
case '}':
case '\\':
case '#':
return;
}
}
printf("%c%c%c%c%c%c%c%c\n",temp_key[0],temp_key[1],temp_key[2],temp_key[3],temp_key[4]
,temp_key[5],temp_key[6],temp_key[7]);
} |
912a2e44b4bd59fbe2461044b5ebe1caee1fb9e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void stencil_1d(int *in, int *out)
{
// __shared__ keyword to declare variables in shared block memory
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS;
int lindex = threadIdx.x + RADIUS;
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS)
{
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex-RADIUS] = result;
} | 912a2e44b4bd59fbe2461044b5ebe1caee1fb9e1.cu | #include "includes.h"
__global__ void stencil_1d(int *in, int *out)
{
// __shared__ keyword to declare variables in shared block memory
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS;
int lindex = threadIdx.x + RADIUS;
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS)
{
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex-RADIUS] = result;
} |
fcd19837d69ff9f49b206961b34e106b83dac91d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
/******************************************************************
void
transpose_write_coalease(a, b, n)
int *a
int *b;
int n;
Performance GPU version of transposing a square matrix a to b with dimension = n
return: void
*/
#define ROW 3
#define COLUMN 4
__global__ void transpose_write_coalease(int * a, int *b){
int index = threadIdx.x + blockDim.x * blockIdx.x; /*get the absolute index of thread*/
int new_index = blockIdx.x + gridDim.x * threadIdx.x;
b[new_index] = a[index];
}
void printMatrix(int* m, int a, int b) {
int i, j;
for (i = 0; i < a; i++) {
for (j = 0; j < b; j++) {
printf("%d\t", *(m + (i * b + j)));
}
printf("\n");
}
}
int main(void){
hipSetDevice(0);
/* Initialize */
int n = ROW * COLUMN;
int *a = (int *)malloc (n*sizeof(int));
int *b = (int *)malloc(n*sizeof(int));
for (int i = 0; i<n; i++){
a[i] = i;
b[i] = 0;
}
/* Display input */
printMatrix(a, ROW, COLUMN);
printf("\n");
/* Allocate memory in GPU */
int *dev_a;
int *dev_b;
hipMalloc((void **) &dev_a, n*sizeof(int));
hipMalloc((void **) &dev_b, n*sizeof(int));
hipMemcpy(dev_a, a, n*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, n*sizeof(int), hipMemcpyHostToDevice);
/* Compute */
hipLaunchKernelGGL(( transpose_write_coalease), dim3(ROW), dim3(COLUMN), 0, 0, dev_a, dev_b);
/* Move result from GPU to CPU */
hipMemcpy(a, dev_a, n*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(b, dev_b, n*sizeof(int), hipMemcpyDeviceToHost);
/* Display result */
printMatrix(b, COLUMN, ROW);
/* Free the space occupied in both GPU and CPU */
free(a);
free(b);
hipFree(dev_a);
hipFree(dev_b);
}
| fcd19837d69ff9f49b206961b34e106b83dac91d.cu | #include <stdio.h>
#include <stdlib.h>
/******************************************************************
void
transpose_write_coalease(a, b, n)
int *a
int *b;
int n;
Performance GPU version of transposing a square matrix a to b with dimension = n
return: void
*/
#define ROW 3
#define COLUMN 4
__global__ void transpose_write_coalease(int * a, int *b){
int index = threadIdx.x + blockDim.x * blockIdx.x; /*get the absolute index of thread*/
int new_index = blockIdx.x + gridDim.x * threadIdx.x;
b[new_index] = a[index];
}
void printMatrix(int* m, int a, int b) {
int i, j;
for (i = 0; i < a; i++) {
for (j = 0; j < b; j++) {
printf("%d\t", *(m + (i * b + j)));
}
printf("\n");
}
}
int main(void){
cudaSetDevice(0);
/* Initialize */
int n = ROW * COLUMN;
int *a = (int *)malloc (n*sizeof(int));
int *b = (int *)malloc(n*sizeof(int));
for (int i = 0; i<n; i++){
a[i] = i;
b[i] = 0;
}
/* Display input */
printMatrix(a, ROW, COLUMN);
printf("\n");
/* Allocate memory in GPU */
int *dev_a;
int *dev_b;
cudaMalloc((void **) &dev_a, n*sizeof(int));
cudaMalloc((void **) &dev_b, n*sizeof(int));
cudaMemcpy(dev_a, a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n*sizeof(int), cudaMemcpyHostToDevice);
/* Compute */
transpose_write_coalease<<<ROW, COLUMN>>>(dev_a, dev_b);
/* Move result from GPU to CPU */
cudaMemcpy(a, dev_a, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(b, dev_b, n*sizeof(int), cudaMemcpyDeviceToHost);
/* Display result */
printMatrix(b, COLUMN, ROW);
/* Free the space occupied in both GPU and CPU */
free(a);
free(b);
cudaFree(dev_a);
cudaFree(dev_b);
}
|
9b13621fadc0e7f378fa79f03dcad205f25b947c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "crop_layer.h"
#include "utils.h"
#include "hip/hip_runtime.h"
#include "image.h"
__device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c)
{
if(x < 0 || x >= w || y < 0 || y >= h) return 0;
return image[x + w*(y + c*h)];
}
__device__ float3 rgb_to_hsv_kernel(float3 rgb)
{
float r = rgb.x;
float g = rgb.y;
float b = rgb.z;
float h, s, v;
float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b);
float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b);
float delta = max - min;
v = max;
if(max == 0){
s = 0;
h = -1;
}else{
s = delta/max;
if(r == max){
h = (g - b) / delta;
} else if (g == max) {
h = 2 + (b - r) / delta;
} else {
h = 4 + (r - g) / delta;
}
if (h < 0) h += 6;
}
return make_float3(h, s, v);
}
__device__ float3 hsv_to_rgb_kernel(float3 hsv)
{
float h = hsv.x;
float s = hsv.y;
float v = hsv.z;
float r, g, b;
float f, p, q, t;
if (s == 0) {
r = g = b = v;
} else {
int index = (int) floorf(h);
f = h - index;
p = v*(1-s);
q = v*(1-s*f);
t = v*(1-s*(1-f));
if(index == 0){
r = v; g = t; b = p;
} else if(index == 1){
r = q; g = v; b = p;
} else if(index == 2){
r = p; g = v; b = t;
} else if(index == 3){
r = p; g = q; b = v;
} else if(index == 4){
r = t; g = p; b = v;
} else {
r = v; g = p; b = q;
}
}
r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
return make_float3(r, g, b);
}
__device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c)
{
int ix = (int) floorf(x);
int iy = (int) floorf(y);
float dx = x - ix;
float dy = y - iy;
float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) +
(1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) +
dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c);
return val;
}
__global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift)
{
int size = batch * w * h;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
int x = id % w;
id /= w;
int y = id % h;
id /= h;
float rshift = rand[0];
float gshift = rand[1];
float bshift = rand[2];
float r0 = rand[8*id + 0];
float r1 = rand[8*id + 1];
float r2 = rand[8*id + 2];
float r3 = rand[8*id + 3];
saturation = r0*(saturation - 1) + 1;
saturation = (r1 > .5) ? 1./saturation : saturation;
exposure = r2*(exposure - 1) + 1;
exposure = (r3 > .5) ? 1./exposure : exposure;
size_t offset = id * h * w * 3;
image += offset;
float r = image[x + w*(y + h*0)];
float g = image[x + w*(y + h*1)];
float b = image[x + w*(y + h*2)];
float3 rgb = make_float3(r,g,b);
if(train){
float3 hsv = rgb_to_hsv_kernel(rgb);
hsv.y *= saturation;
hsv.z *= exposure;
rgb = hsv_to_rgb_kernel(hsv);
} else {
shift = 0;
}
image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5)*shift;
image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5)*shift;
image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5)*shift;
}
__global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
float cx = w/2.;
float cy = h/2.;
int count = id;
int j = id % crop_width;
id /= crop_width;
int i = id % crop_height;
id /= crop_height;
int k = id % c;
id /= c;
int b = id;
float r4 = rand[8*b + 4];
float r5 = rand[8*b + 5];
float r6 = rand[8*b + 6];
float r7 = rand[8*b + 7];
float dw = (w - crop_width)*r4;
float dh = (h - crop_height)*r5;
flip = (flip && (r6 > .5));
angle = 2*angle*r7 - angle;
if(!train){
dw = (w - crop_width)/2.;
dh = (h - crop_height)/2.;
flip = 0;
angle = 0;
}
input += w*h*c*b;
float x = (flip) ? w - dw - j - 1 : j + dw;
float y = i + dh;
float rx = cos(angle)*(x-cx) - sin(angle)*(y-cy) + cx;
float ry = sin(angle)*(x-cx) + cos(angle)*(y-cy) + cy;
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
void forward_crop_layer_gpu(crop_layer layer, network_state state)
{
cuda_random(layer.rand_gpu, layer.batch*8);
float radians = layer.angle*3.14159265/180.;
float scale = 2;
float translate = -1;
if(layer.noadjust){
scale = 1;
translate = 0;
}
int size = layer.batch * layer.w * layer.h;
hipLaunchKernelGGL(( levels_image_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, state.input, layer.rand_gpu, layer.batch, layer.w, layer.h, state.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
check_error(hipPeekAtLastError());
size = layer.batch*layer.c*layer.out_w*layer.out_h;
hipLaunchKernelGGL(( forward_crop_layer_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, state.input, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, state.train, layer.flip, radians, layer.output_gpu);
check_error(hipPeekAtLastError());
/*
cuda_pull_array(layer.output_gpu, layer.output, size);
image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch));
image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch));
image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch));
translate_image(im, -translate);
scale_image(im, 1/scale);
translate_image(im2, -translate);
scale_image(im2, 1/scale);
translate_image(im3, -translate);
scale_image(im3, 1/scale);
show_image(im, "cropped");
show_image(im2, "cropped2");
show_image(im3, "cropped3");
cvWaitKey(0);
*/
}
| 9b13621fadc0e7f378fa79f03dcad205f25b947c.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "crop_layer.h"
#include "utils.h"
#include "cuda.h"
#include "image.h"
__device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c)
{
if(x < 0 || x >= w || y < 0 || y >= h) return 0;
return image[x + w*(y + c*h)];
}
__device__ float3 rgb_to_hsv_kernel(float3 rgb)
{
float r = rgb.x;
float g = rgb.y;
float b = rgb.z;
float h, s, v;
float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b);
float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b);
float delta = max - min;
v = max;
if(max == 0){
s = 0;
h = -1;
}else{
s = delta/max;
if(r == max){
h = (g - b) / delta;
} else if (g == max) {
h = 2 + (b - r) / delta;
} else {
h = 4 + (r - g) / delta;
}
if (h < 0) h += 6;
}
return make_float3(h, s, v);
}
__device__ float3 hsv_to_rgb_kernel(float3 hsv)
{
float h = hsv.x;
float s = hsv.y;
float v = hsv.z;
float r, g, b;
float f, p, q, t;
if (s == 0) {
r = g = b = v;
} else {
int index = (int) floorf(h);
f = h - index;
p = v*(1-s);
q = v*(1-s*f);
t = v*(1-s*(1-f));
if(index == 0){
r = v; g = t; b = p;
} else if(index == 1){
r = q; g = v; b = p;
} else if(index == 2){
r = p; g = v; b = t;
} else if(index == 3){
r = p; g = q; b = v;
} else if(index == 4){
r = t; g = p; b = v;
} else {
r = v; g = p; b = q;
}
}
r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
return make_float3(r, g, b);
}
__device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c)
{
int ix = (int) floorf(x);
int iy = (int) floorf(y);
float dx = x - ix;
float dy = y - iy;
float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) +
(1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) +
dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c);
return val;
}
__global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift)
{
int size = batch * w * h;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
int x = id % w;
id /= w;
int y = id % h;
id /= h;
float rshift = rand[0];
float gshift = rand[1];
float bshift = rand[2];
float r0 = rand[8*id + 0];
float r1 = rand[8*id + 1];
float r2 = rand[8*id + 2];
float r3 = rand[8*id + 3];
saturation = r0*(saturation - 1) + 1;
saturation = (r1 > .5) ? 1./saturation : saturation;
exposure = r2*(exposure - 1) + 1;
exposure = (r3 > .5) ? 1./exposure : exposure;
size_t offset = id * h * w * 3;
image += offset;
float r = image[x + w*(y + h*0)];
float g = image[x + w*(y + h*1)];
float b = image[x + w*(y + h*2)];
float3 rgb = make_float3(r,g,b);
if(train){
float3 hsv = rgb_to_hsv_kernel(rgb);
hsv.y *= saturation;
hsv.z *= exposure;
rgb = hsv_to_rgb_kernel(hsv);
} else {
shift = 0;
}
image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5)*shift;
image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5)*shift;
image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5)*shift;
}
__global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
float cx = w/2.;
float cy = h/2.;
int count = id;
int j = id % crop_width;
id /= crop_width;
int i = id % crop_height;
id /= crop_height;
int k = id % c;
id /= c;
int b = id;
float r4 = rand[8*b + 4];
float r5 = rand[8*b + 5];
float r6 = rand[8*b + 6];
float r7 = rand[8*b + 7];
float dw = (w - crop_width)*r4;
float dh = (h - crop_height)*r5;
flip = (flip && (r6 > .5));
angle = 2*angle*r7 - angle;
if(!train){
dw = (w - crop_width)/2.;
dh = (h - crop_height)/2.;
flip = 0;
angle = 0;
}
input += w*h*c*b;
float x = (flip) ? w - dw - j - 1 : j + dw;
float y = i + dh;
float rx = cos(angle)*(x-cx) - sin(angle)*(y-cy) + cx;
float ry = sin(angle)*(x-cx) + cos(angle)*(y-cy) + cy;
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
void forward_crop_layer_gpu(crop_layer layer, network_state state)
{
cuda_random(layer.rand_gpu, layer.batch*8);
float radians = layer.angle*3.14159265/180.;
float scale = 2;
float translate = -1;
if(layer.noadjust){
scale = 1;
translate = 0;
}
int size = layer.batch * layer.w * layer.h;
levels_image_kernel<<<cuda_gridsize(size), BLOCK>>>(state.input, layer.rand_gpu, layer.batch, layer.w, layer.h, state.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
check_error(cudaPeekAtLastError());
size = layer.batch*layer.c*layer.out_w*layer.out_h;
forward_crop_layer_kernel<<<cuda_gridsize(size), BLOCK>>>(state.input, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, state.train, layer.flip, radians, layer.output_gpu);
check_error(cudaPeekAtLastError());
/*
cuda_pull_array(layer.output_gpu, layer.output, size);
image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch));
image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch));
image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch));
translate_image(im, -translate);
scale_image(im, 1/scale);
translate_image(im2, -translate);
scale_image(im2, 1/scale);
translate_image(im3, -translate);
scale_image(im3, 1/scale);
show_image(im, "cropped");
show_image(im2, "cropped2");
show_image(im3, "cropped3");
cvWaitKey(0);
*/
}
|
4d1f2d3891b3138428588d65885ad7accbe3a81d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// these are just for timing measurments
#include <time.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const int DSIZE = 4096;
const int block_size = 16; // CUDA maximum is 1024 *total* threads in block
const float A_val = 1.0f;
const float B_val = 2.0f;
// matrix multiply (naive) kernel: C = A * B
__global__ void mmul(const float *A, const float *B, float *C, int ds) {
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create thread x index
int idy = threadIdx.y+blockDim.y*blockIdx.y; // create thread y index
if ((idx < ds) && (idy < ds)){
float temp = 0;
for (int i = 0; i < ds; i++)
temp += A[idx*ds+i] * B[i*ds+idy]; // dot product of row and column
C[idy*ds+idx] = temp;
}
}
int main(){
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
// these are just for timing
clock_t t0, t1, t2;
double t1sum=0.0;
double t2sum=0.0;
// start timing
t0 = clock();
h_A = new float[DSIZE*DSIZE];
h_B = new float[DSIZE*DSIZE];
h_C = new float[DSIZE*DSIZE];
for (int i = 0; i < DSIZE*DSIZE; i++){
h_A[i] = A_val;
h_B[i] = B_val;
h_C[i] = 0;}
// Initialization timing
t1 = clock();
t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
printf("Init took %f seconds. Begin compute\n", t1sum);
// Allocate device memory and copy input data over to GPU
hipMalloc(&d_A, DSIZE*DSIZE*sizeof(float));
hipMalloc(&d_B, DSIZE*DSIZE*sizeof(float));
hipMalloc(&d_C, DSIZE*DSIZE*sizeof(float));
cudaCheckErrors("hipMalloc failure");
hipMemcpy(d_A, h_A, DSIZE*DSIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, DSIZE*DSIZE*sizeof(float), hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy H2D failure");
// Cuda processing sequence step 1 is complete
// Launch kernel
dim3 block(block_size, block_size); // dim3 variable holds 3 dimensions
dim3 grid((DSIZE+block.x-1)/block.x, (DSIZE+block.y-1)/block.y);
hipLaunchKernelGGL(( mmul), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, DSIZE);
cudaCheckErrors("kernel launch failure");
// Cuda processing sequence step 2 is complete
// Copy results back to host
hipMemcpy(h_C, d_C, DSIZE*DSIZE*sizeof(float), hipMemcpyDeviceToHost);
// GPU timing
t2 = clock();
t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf ("Done. Compute took %f seconds\n", t2sum);
// Cuda processing sequence step 3 is complete
// Verify results
cudaCheckErrors("kernel execution failure or hipMemcpy H2D failure");
for (int i = 0; i < DSIZE*DSIZE; i++) if (h_C[i] != A_val*B_val*DSIZE) {printf("mismatch at index %d, was: %f, should be: %f\n", i, h_C[i], A_val*B_val*DSIZE); return -1;}
printf("Success!\n");
return 0;
}
| 4d1f2d3891b3138428588d65885ad7accbe3a81d.cu | #include <stdio.h>
// these are just for timing measurments
#include <time.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const int DSIZE = 4096;
const int block_size = 16; // CUDA maximum is 1024 *total* threads in block
const float A_val = 1.0f;
const float B_val = 2.0f;
// matrix multiply (naive) kernel: C = A * B
__global__ void mmul(const float *A, const float *B, float *C, int ds) {
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create thread x index
int idy = threadIdx.y+blockDim.y*blockIdx.y; // create thread y index
if ((idx < ds) && (idy < ds)){
float temp = 0;
for (int i = 0; i < ds; i++)
temp += A[idx*ds+i] * B[i*ds+idy]; // dot product of row and column
C[idy*ds+idx] = temp;
}
}
int main(){
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
// these are just for timing
clock_t t0, t1, t2;
double t1sum=0.0;
double t2sum=0.0;
// start timing
t0 = clock();
h_A = new float[DSIZE*DSIZE];
h_B = new float[DSIZE*DSIZE];
h_C = new float[DSIZE*DSIZE];
for (int i = 0; i < DSIZE*DSIZE; i++){
h_A[i] = A_val;
h_B[i] = B_val;
h_C[i] = 0;}
// Initialization timing
t1 = clock();
t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
printf("Init took %f seconds. Begin compute\n", t1sum);
// Allocate device memory and copy input data over to GPU
cudaMalloc(&d_A, DSIZE*DSIZE*sizeof(float));
cudaMalloc(&d_B, DSIZE*DSIZE*sizeof(float));
cudaMalloc(&d_C, DSIZE*DSIZE*sizeof(float));
cudaCheckErrors("cudaMalloc failure");
cudaMemcpy(d_A, h_A, DSIZE*DSIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, DSIZE*DSIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy H2D failure");
// Cuda processing sequence step 1 is complete
// Launch kernel
dim3 block(block_size, block_size); // dim3 variable holds 3 dimensions
dim3 grid((DSIZE+block.x-1)/block.x, (DSIZE+block.y-1)/block.y);
mmul<<<grid, block>>>(d_A, d_B, d_C, DSIZE);
cudaCheckErrors("kernel launch failure");
// Cuda processing sequence step 2 is complete
// Copy results back to host
cudaMemcpy(h_C, d_C, DSIZE*DSIZE*sizeof(float), cudaMemcpyDeviceToHost);
// GPU timing
t2 = clock();
t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf ("Done. Compute took %f seconds\n", t2sum);
// Cuda processing sequence step 3 is complete
// Verify results
cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure");
for (int i = 0; i < DSIZE*DSIZE; i++) if (h_C[i] != A_val*B_val*DSIZE) {printf("mismatch at index %d, was: %f, should be: %f\n", i, h_C[i], A_val*B_val*DSIZE); return -1;}
printf("Success!\n");
return 0;
}
|
a4fcbe4d8c77d2b1976c035a78652cf18ab9acbb.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <iostream>
#include <unordered_map>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include "constants.h"
#include "caster/caster_cuda.h"
using namespace std;
// initialize pos in Samples
// initialize num_components
__global__ void initializeSamples(int n, Sample *samples, float2 *positions,
short *sampleFreq) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
Sample sample;
sample.pos = positions[i];
sample.v = {0, 0};
sample.num_components = sampleFreq[i];
// FIXME - malloc can return NULL
sample.components =
(float2 *)malloc(sample.num_components * sizeof(float2));
samples[i] = sample;
}
}
__global__ void initializeDistances(int nDst, DistElem *distances,
short2 *dstIndexes, Sample *samples) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < nDst) {
DistElem dst = distances[i];
dst.comp1 = &samples[dst.i].components[dstIndexes[i].x];
dst.comp2 = &samples[dst.j].components[dstIndexes[i].y];
distances[i] = dst;
}
}
void CasterCuda::initializeHelperVectors() {
/*
* calculate number of distances for each sample and index of each distance
* for a given sample
*/
short *sampleFreq = new short[positions.size()];
for (unsigned i = 0; i < positions.size(); i++) {
sampleFreq[i] = 0;
}
short2 *dstIndexes = new short2[distances.size()];
for (unsigned i = 0; i < distances.size(); i++) {
dstIndexes[i] = {sampleFreq[distances[i].i]++,
sampleFreq[distances[i].j]++};
}
// initialize samples
short *d_sample_freq;
cuCall(hipMalloc(&d_sample_freq, positions.size() * sizeof(short)));
cuCall(hipMemcpy(d_sample_freq, sampleFreq, sizeof(short) * positions.size(),
hipMemcpyHostToDevice));
delete sampleFreq;
hipLaunchKernelGGL(( initializeSamples), dim3(positions.size() / 256 + 1), dim3(256), 0, 0,
positions.size(), d_samples, d_positions, d_sample_freq);
cuCall(hipFree(d_sample_freq));
// initialize comps in Distances in device memory
short2 *d_dst_indexes;
cuCall(hipMalloc(&d_dst_indexes, distances.size() * sizeof(short2)));
cuCall(hipMemcpy(d_dst_indexes, dstIndexes,
sizeof(short2) * distances.size(), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( initializeDistances), dim3(distances.size() / 256 + 1), dim3(256), 0, 0,
distances.size(), d_distances, d_dst_indexes, d_samples);
cuCall(hipFree(d_dst_indexes));
delete dstIndexes;
}
/*
* This function performs the preprocessing on the CPU that is optional
*
* Sorts samples by number of their distances and sorts distances by
* index i or j to utilize cache better. After sorting samples, their indexes
* change so we have to update distances once more
*/
void CasterCuda::sortHostSamples(vector<int> &labels) {
// create array of sorted indexes
vector<short> sampleFreq(positions.size());
for (unsigned i = 0; i < positions.size(); i++) {
sampleFreq[i] = 0;
}
vector<int> sampleIndexes(positions.size());
for (unsigned i = 0; i < positions.size(); i++) {
sampleIndexes[i] = i;
}
sort(sampleIndexes.begin(), sampleIndexes.end(),
[&sampleFreq](const int &a, const int &b) -> bool {
if (sampleFreq[a] != sampleFreq[b]) {
return sampleFreq[a] < sampleFreq[b];
} else {
return a < b;
}
});
// create mapping index->new index
vector<int> newIndexes(positions.size());
for (unsigned i = 0; i < positions.size(); i++) {
newIndexes[sampleIndexes[i]] = i;
}
// sort positions
vector<float2> positionsCopy = positions;
vector<int> labelsCopy = labels;
for (unsigned i = 0; i < positions.size(); i++) {
positions[i] = positionsCopy[sampleIndexes[i]];
labels[i] = labelsCopy[sampleIndexes[i]];
}
// update indexes in distances
for (unsigned i = 0; i < distances.size(); i++) {
distances[i].i = newIndexes[distances[i].i];
distances[i].j = newIndexes[distances[i].j];
}
// sort distances
sort(distances.begin(), distances.end(),
[](const DistElem &a, const DistElem &b) -> bool {
if (a.i != b.i) {
return a.i < b.i;
} else {
return a.j <= b.j;
}
});
}
bool CasterCuda::allocateInitializeDeviceMemory() {
cuCall(hipMalloc(&d_positions, positions.size() * sizeof(float2)));
cuCall(hipMalloc(&d_samples, positions.size() * sizeof(Sample)));
cuCall(hipMalloc(&d_distances, distances.size() * sizeof(DistElem)));
cuCall(hipMalloc(&d_errors, distances.size() * sizeof(float)));
cuCall(hipMemcpy(d_positions, &positions[0],
sizeof(float2) * positions.size(), hipMemcpyHostToDevice));
cuCall(hipMemset(d_samples, 0, positions.size() * sizeof(Sample)));
cuCall(hipMemset(d_errors, 0, distances.size() * sizeof(float)));
cuCall(hipMemcpy(d_distances, &distances[0],
sizeof(DistElem) * distances.size(),
hipMemcpyHostToDevice));
return true;
}
__global__ void copyPosRelease(int N, Sample *samples, float2 *positions) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
positions[i] = samples[i].pos;
free(samples[i].components);
}
}
void CasterCuda::prepare(vector<int> &labels){
sortHostSamples(labels);
allocateInitializeDeviceMemory();
}
void CasterCuda::finish(){
copyResultsToHost();
}
__global__ void copyDevicePos(int N, Sample *samples, float2 *positions) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
positions[i] = samples[i].pos;
}
}
void CasterCuda::copyPositions() {
hipLaunchKernelGGL(( copyDevicePos), dim3(positions.size() / 256 + 1), dim3(256), 0, 0, positions.size(),
d_samples, d_positions);
cuCall(hipMemcpy(&positions[0], d_positions,
sizeof(float2) * positions.size(), hipMemcpyDeviceToHost));
}
bool CasterCuda::copyResultsToHost() {
hipLaunchKernelGGL(( copyPosRelease), dim3(positions.size() / 256 + 1), dim3(256), 0, 0, positions.size(),
d_samples, d_positions);
cuCall(hipMemcpy(&positions[0], d_positions,
sizeof(float2) * positions.size(), hipMemcpyDeviceToHost));
cuCall(hipFree(d_positions));
cuCall(hipFree(d_distances));
cuCall(hipFree(d_samples));
return true;
}
__global__ void calculateErrors(int dstNum, DistElem *distances, Sample *samples, float *errors) {
for (unsigned i = blockIdx.x * blockDim.x + threadIdx.x; i < dstNum;
i += blockDim.x * gridDim.x) {
DistElem dist = distances[i];
float d = dist.r;
float2 iPos = samples[dist.i].pos;
float2 jPos = samples[dist.j].pos;
float2 ij = {iPos.x - jPos.x, jPos.y - jPos.y};
errors[i] = fabs(d - sqrtf(ij.x * ij.x + ij.y * ij.y));
}
}
float CasterCuda::getError() {
hipLaunchKernelGGL(( calculateErrors), dim3(256), dim3(256), 0, 0, distances.size(), d_distances,
d_samples, d_errors);
thrust::device_ptr<float> err_ptr = thrust::device_pointer_cast(d_errors);
return thrust::reduce(err_ptr, err_ptr + distances.size(), 0.0, thrust::plus<float>());
}
void CasterCuda::simul_step() {
if (!it++) {
initializeHelperVectors();
}
simul_step_cuda();
if(it % 100 == 0) {
onError(getError());
}
if((itToPosReady--) == 0) {
onPositions(positions);
}
if(it % 2000 == 0) {
copyPositions();
itToPosReady = 5;
hipDeviceSynchronize();
}
};
| a4fcbe4d8c77d2b1976c035a78652cf18ab9acbb.cu | #include <cuda.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <iostream>
#include <unordered_map>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include "constants.h"
#include "caster/caster_cuda.h"
using namespace std;
// initialize pos in Samples
// initialize num_components
__global__ void initializeSamples(int n, Sample *samples, float2 *positions,
short *sampleFreq) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
Sample sample;
sample.pos = positions[i];
sample.v = {0, 0};
sample.num_components = sampleFreq[i];
// FIXME - malloc can return NULL
sample.components =
(float2 *)malloc(sample.num_components * sizeof(float2));
samples[i] = sample;
}
}
__global__ void initializeDistances(int nDst, DistElem *distances,
short2 *dstIndexes, Sample *samples) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < nDst) {
DistElem dst = distances[i];
dst.comp1 = &samples[dst.i].components[dstIndexes[i].x];
dst.comp2 = &samples[dst.j].components[dstIndexes[i].y];
distances[i] = dst;
}
}
void CasterCuda::initializeHelperVectors() {
/*
* calculate number of distances for each sample and index of each distance
* for a given sample
*/
short *sampleFreq = new short[positions.size()];
for (unsigned i = 0; i < positions.size(); i++) {
sampleFreq[i] = 0;
}
short2 *dstIndexes = new short2[distances.size()];
for (unsigned i = 0; i < distances.size(); i++) {
dstIndexes[i] = {sampleFreq[distances[i].i]++,
sampleFreq[distances[i].j]++};
}
// initialize samples
short *d_sample_freq;
cuCall(cudaMalloc(&d_sample_freq, positions.size() * sizeof(short)));
cuCall(cudaMemcpy(d_sample_freq, sampleFreq, sizeof(short) * positions.size(),
cudaMemcpyHostToDevice));
delete sampleFreq;
initializeSamples<<<positions.size() / 256 + 1, 256>>>(
positions.size(), d_samples, d_positions, d_sample_freq);
cuCall(cudaFree(d_sample_freq));
// initialize comps in Distances in device memory
short2 *d_dst_indexes;
cuCall(cudaMalloc(&d_dst_indexes, distances.size() * sizeof(short2)));
cuCall(cudaMemcpy(d_dst_indexes, dstIndexes,
sizeof(short2) * distances.size(), cudaMemcpyHostToDevice));
initializeDistances<<<distances.size() / 256 + 1, 256>>>(
distances.size(), d_distances, d_dst_indexes, d_samples);
cuCall(cudaFree(d_dst_indexes));
delete dstIndexes;
}
/*
* This function performs the preprocessing on the CPU that is optional
*
* Sorts samples by number of their distances and sorts distances by
* index i or j to utilize cache better. After sorting samples, their indexes
* change so we have to update distances once more
*/
void CasterCuda::sortHostSamples(vector<int> &labels) {
// create array of sorted indexes
vector<short> sampleFreq(positions.size());
for (unsigned i = 0; i < positions.size(); i++) {
sampleFreq[i] = 0;
}
vector<int> sampleIndexes(positions.size());
for (unsigned i = 0; i < positions.size(); i++) {
sampleIndexes[i] = i;
}
sort(sampleIndexes.begin(), sampleIndexes.end(),
[&sampleFreq](const int &a, const int &b) -> bool {
if (sampleFreq[a] != sampleFreq[b]) {
return sampleFreq[a] < sampleFreq[b];
} else {
return a < b;
}
});
// create mapping index->new index
vector<int> newIndexes(positions.size());
for (unsigned i = 0; i < positions.size(); i++) {
newIndexes[sampleIndexes[i]] = i;
}
// sort positions
vector<float2> positionsCopy = positions;
vector<int> labelsCopy = labels;
for (unsigned i = 0; i < positions.size(); i++) {
positions[i] = positionsCopy[sampleIndexes[i]];
labels[i] = labelsCopy[sampleIndexes[i]];
}
// update indexes in distances
for (unsigned i = 0; i < distances.size(); i++) {
distances[i].i = newIndexes[distances[i].i];
distances[i].j = newIndexes[distances[i].j];
}
// sort distances
sort(distances.begin(), distances.end(),
[](const DistElem &a, const DistElem &b) -> bool {
if (a.i != b.i) {
return a.i < b.i;
} else {
return a.j <= b.j;
}
});
}
bool CasterCuda::allocateInitializeDeviceMemory() {
cuCall(cudaMalloc(&d_positions, positions.size() * sizeof(float2)));
cuCall(cudaMalloc(&d_samples, positions.size() * sizeof(Sample)));
cuCall(cudaMalloc(&d_distances, distances.size() * sizeof(DistElem)));
cuCall(cudaMalloc(&d_errors, distances.size() * sizeof(float)));
cuCall(cudaMemcpy(d_positions, &positions[0],
sizeof(float2) * positions.size(), cudaMemcpyHostToDevice));
cuCall(cudaMemset(d_samples, 0, positions.size() * sizeof(Sample)));
cuCall(cudaMemset(d_errors, 0, distances.size() * sizeof(float)));
cuCall(cudaMemcpy(d_distances, &distances[0],
sizeof(DistElem) * distances.size(),
cudaMemcpyHostToDevice));
return true;
}
__global__ void copyPosRelease(int N, Sample *samples, float2 *positions) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
positions[i] = samples[i].pos;
free(samples[i].components);
}
}
void CasterCuda::prepare(vector<int> &labels){
sortHostSamples(labels);
allocateInitializeDeviceMemory();
}
void CasterCuda::finish(){
copyResultsToHost();
}
__global__ void copyDevicePos(int N, Sample *samples, float2 *positions) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
positions[i] = samples[i].pos;
}
}
void CasterCuda::copyPositions() {
copyDevicePos<<<positions.size() / 256 + 1, 256>>>(positions.size(),
d_samples, d_positions);
cuCall(cudaMemcpy(&positions[0], d_positions,
sizeof(float2) * positions.size(), cudaMemcpyDeviceToHost));
}
bool CasterCuda::copyResultsToHost() {
copyPosRelease<<<positions.size() / 256 + 1, 256>>>(positions.size(),
d_samples, d_positions);
cuCall(cudaMemcpy(&positions[0], d_positions,
sizeof(float2) * positions.size(), cudaMemcpyDeviceToHost));
cuCall(cudaFree(d_positions));
cuCall(cudaFree(d_distances));
cuCall(cudaFree(d_samples));
return true;
}
__global__ void calculateErrors(int dstNum, DistElem *distances, Sample *samples, float *errors) {
for (unsigned i = blockIdx.x * blockDim.x + threadIdx.x; i < dstNum;
i += blockDim.x * gridDim.x) {
DistElem dist = distances[i];
float d = dist.r;
float2 iPos = samples[dist.i].pos;
float2 jPos = samples[dist.j].pos;
float2 ij = {iPos.x - jPos.x, jPos.y - jPos.y};
errors[i] = fabs(d - sqrtf(ij.x * ij.x + ij.y * ij.y));
}
}
float CasterCuda::getError() {
calculateErrors<<<256, 256>>>(distances.size(), d_distances,
d_samples, d_errors);
thrust::device_ptr<float> err_ptr = thrust::device_pointer_cast(d_errors);
return thrust::reduce(err_ptr, err_ptr + distances.size(), 0.0, thrust::plus<float>());
}
void CasterCuda::simul_step() {
if (!it++) {
initializeHelperVectors();
}
simul_step_cuda();
if(it % 100 == 0) {
onError(getError());
}
if((itToPosReady--) == 0) {
onPositions(positions);
}
if(it % 2000 == 0) {
copyPositions();
itToPosReady = 5;
cudaDeviceSynchronize();
}
};
|
cb1a381ea3fd5526ded380d0a17c63353958dd8e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_fmaxf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_fmaxf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_fmaxf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_fmaxf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cb1a381ea3fd5526ded380d0a17c63353958dd8e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_fmaxf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_fmaxf<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_fmaxf<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_fmaxf<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c7251286abb628b308cb1ab44419b902b54ff6ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Distributed under MIT licence. See https://github.com/QuEST-Kit/QuEST/blob/master/LICENCE.txt for details
/** @file
* An implementation of the backend in ../QuEST_internal.h for a GPU environment.
*
* @author Ania Brown
* @author Tyson Jones
*/
# include "QuEST.h"
# include "QuEST_precision.h"
# include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey
# include "mt19937ar.h"
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# define REDUCE_SHARED_SIZE 512
# define DEBUG 0
/*
* struct types for concisely passing unitaries to kernels
*/
// hide these from doxygen
/// \cond HIDDEN_SYMBOLS
typedef struct ArgMatrix2 {
Complex r0c0, r0c1;
Complex r1c0, r1c1;
} ArgMatrix2;
typedef struct ArgMatrix4
{
Complex r0c0, r0c1, r0c2, r0c3;
Complex r1c0, r1c1, r1c2, r1c3;
Complex r2c0, r2c1, r2c2, r2c3;
Complex r3c0, r3c1, r3c2, r3c3;
} ArgMatrix4;
ArgMatrix2 argifyMatrix2(ComplexMatrix2 m) {
ArgMatrix2 a;
a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0];
a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1];
a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0];
a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1];
return a;
}
ArgMatrix4 argifyMatrix4(ComplexMatrix4 m) {
ArgMatrix4 a;
a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0];
a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1];
a.r0c2.real=m.real[0][2]; a.r0c2.imag=m.imag[0][2];
a.r0c3.real=m.real[0][3]; a.r0c3.imag=m.imag[0][3];
a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0];
a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1];
a.r1c2.real=m.real[1][2]; a.r1c2.imag=m.imag[1][2];
a.r1c3.real=m.real[1][3]; a.r1c3.imag=m.imag[1][3];
a.r2c0.real=m.real[2][0]; a.r2c0.imag=m.imag[2][0];
a.r2c1.real=m.real[2][1]; a.r2c1.imag=m.imag[2][1];
a.r2c2.real=m.real[2][2]; a.r2c2.imag=m.imag[2][2];
a.r2c3.real=m.real[2][3]; a.r2c3.imag=m.imag[2][3];
a.r3c0.real=m.real[3][0]; a.r3c0.imag=m.imag[3][0];
a.r3c1.real=m.real[3][1]; a.r3c1.imag=m.imag[3][1];
a.r3c2.real=m.real[3][2]; a.r3c2.imag=m.imag[3][2];
a.r3c3.real=m.real[3][3]; a.r3c3.imag=m.imag[3][3];
return a;
}
/// \endcond
/*
* in-kernel bit twiddling functions
*/
__forceinline__ __device__ int extractBit (const int locationOfBitFromRight, const long long int theEncodedNumber) {
return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight;
}
__forceinline__ __device__ int getBitMaskParity(long long int mask) {
int parity = 0;
while (mask) {
parity = !parity;
mask = mask & (mask-1);
}
return parity;
}
__forceinline__ __device__ long long int flipBit(const long long int number, const int bitInd) {
return (number ^ (1LL << bitInd));
}
__forceinline__ __device__ long long int insertZeroBit(const long long int number, const int index) {
long long int left, right;
left = (number >> index) << index;
right = number - left;
return (left << 1) ^ right;
}
__forceinline__ __device__ long long int insertTwoZeroBits(const long long int number, const int bit1, const int bit2) {
int small = (bit1 < bit2)? bit1 : bit2;
int big = (bit1 < bit2)? bit2 : bit1;
return insertZeroBit(insertZeroBit(number, small), big);
}
__forceinline__ __device__ long long int insertZeroBits(long long int number, int* inds, const int numInds) {
/* inserted bit inds must strictly increase, so that their final indices are correct.
* in-lieu of sorting (avoided since no C++ variable-size arrays, and since we're already
* memory bottle-necked so overhead eats this slowdown), we find the next-smallest index each
* at each insert. recall every element of inds (a positive or zero number) is unique.
* This function won't appear in the CPU code, which can use C99 variable-size arrays and
* ought to make a sorted array before threading
*/
int curMin = inds[0];
int prevMin = -1;
for (int n=0; n < numInds; n++) {
// find next min
for (int t=0; t < numInds; t++)
if (inds[t]>prevMin && inds[t]<curMin)
curMin = inds[t];
number = insertZeroBit(number, curMin);
// set curMin to an arbitrary non-visited elem
prevMin = curMin;
for (int t=0; t < numInds; t++)
if (inds[t] > curMin) {
curMin = inds[t];
break;
}
}
return number;
}
/*
* state vector and density matrix operations
*/
#ifdef __cplusplus
extern "C" {
#endif
void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) {
hipDeviceSynchronize();
hipMemcpy(
qureg.deviceStateVec.real + startInd,
reals,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
hipMemcpyHostToDevice);
hipMemcpy(
qureg.deviceStateVec.imag + startInd,
imags,
numAmps * sizeof(*(qureg.deviceStateVec.imag)),
hipMemcpyHostToDevice);
}
/** works for both statevectors and density matrices */
void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) {
// copy copyQureg's GPU statevec to targetQureg's GPU statevec
hipDeviceSynchronize();
hipMemcpy(
targetQureg.deviceStateVec.real,
copyQureg.deviceStateVec.real,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)),
hipMemcpyDeviceToDevice);
hipMemcpy(
targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.imag,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)),
hipMemcpyDeviceToDevice);
}
__global__ void densmatr_initPureStateKernel(
long long int numPureAmps,
qreal *targetVecReal, qreal *targetVecImag,
qreal *copyVecReal, qreal *copyVecImag)
{
// this is a particular index of the pure copyQureg
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=numPureAmps) return;
qreal realRow = copyVecReal[index];
qreal imagRow = copyVecImag[index];
for (long long int col=0; col < numPureAmps; col++) {
qreal realCol = copyVecReal[col];
qreal imagCol = - copyVecImag[col]; // minus for conjugation
targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol;
targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol;
}
}
void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_initPureStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
copyQureg.numAmpsPerChunk,
targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag);
}
__global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = probFactor;
stateVecImag[index] = 0.0;
}
void densmatr_initPlusState(Qureg qureg)
{
qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented));
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
probFactor,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void densmatr_initClassicalStateKernel(
long long int densityNumElems,
qreal *densityReal, qreal *densityImag,
long long int densityInd)
{
// initialise the state to all zeros
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= densityNumElems) return;
densityReal[index] = 0.0;
densityImag[index] = 0.0;
if (index==densityInd){
// classical state has probability 1
densityReal[densityInd] = 1.0;
densityImag[densityInd] = 0.0;
}
}
void densmatr_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
// index of the desired state in the flat density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int densityInd = (densityDim + 1)*stateInd;
// identical to pure version
hipLaunchKernelGGL(( densmatr_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, densityInd);
}
void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env)
{
// allocate CPU memory
long long int numAmps = 1L << numQubits;
long long int numAmpsPerRank = numAmps/env.numRanks;
qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real));
qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag));
if (env.numRanks>1){
qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real));
qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag));
}
// check cpu memory allocation was successful
if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
qureg->numQubitsInStateVec = numQubits;
qureg->numAmpsPerChunk = numAmpsPerRank;
qureg->numAmpsTotal = numAmps;
qureg->chunkId = env.rank;
qureg->numChunks = env.numRanks;
qureg->isDensityMatrix = 0;
// allocate GPU memory
hipMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real)));
hipMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag)));
hipMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal));
hipMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))*
sizeof(qreal));
// check gpu memory allocation was successful
if (!(qureg->deviceStateVec.real) || !(qureg->deviceStateVec.imag)){
printf("Could not allocate memory on GPU!\n");
exit (EXIT_FAILURE);
}
}
void statevec_destroyQureg(Qureg qureg, QuESTEnv env)
{
// Free CPU memory
free(qureg.stateVec.real);
free(qureg.stateVec.imag);
if (env.numRanks>1){
free(qureg.pairStateVec.real);
free(qureg.pairStateVec.imag);
}
// Free GPU memory
hipFree(qureg.deviceStateVec.real);
hipFree(qureg.deviceStateVec.imag);
hipFree(qureg.firstLevelReduction);
hipFree(qureg.secondLevelReduction);
}
DiagonalOp agnostic_createDiagonalOp(int numQubits, QuESTEnv env) {
DiagonalOp op;
op.numQubits = numQubits;
op.numElemsPerChunk = (1LL << numQubits) / env.numRanks;
op.chunkId = env.rank;
op.numChunks = env.numRanks;
// allocate CPU memory (initialised to zero)
op.real = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal));
op.imag = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal));
// @TODO no handling of rank>1 allocation (no distributed GPU)
// check cpu memory allocation was successful
if ( !op.real || !op.imag ) {
printf("Could not allocate memory!\n");
exit(EXIT_FAILURE);
}
// allocate GPU memory
size_t arrSize = op.numElemsPerChunk * sizeof(qreal);
hipMalloc(&(op.deviceOperator.real), arrSize);
hipMalloc(&(op.deviceOperator.imag), arrSize);
// check gpu memory allocation was successful
if (!op.deviceOperator.real || !op.deviceOperator.imag) {
printf("Could not allocate memory on GPU!\n");
exit(EXIT_FAILURE);
}
// initialise GPU memory to zero
hipMemset(op.deviceOperator.real, 0, arrSize);
hipMemset(op.deviceOperator.imag, 0, arrSize);
return op;
}
void agnostic_destroyDiagonalOp(DiagonalOp op) {
free(op.real);
free(op.imag);
hipFree(op.deviceOperator.real);
hipFree(op.deviceOperator.imag);
}
void agnostic_syncDiagonalOp(DiagonalOp op) {
hipDeviceSynchronize();
size_t mem_elems = op.numElemsPerChunk * sizeof *op.real;
hipMemcpy(op.deviceOperator.real, op.real, mem_elems, hipMemcpyHostToDevice);
hipMemcpy(op.deviceOperator.imag, op.imag, mem_elems, hipMemcpyHostToDevice);
}
__global__ void agnostic_initDiagonalOpFromPauliHamilKernel(
DiagonalOp op, enum pauliOpType* pauliCodes, qreal* termCoeffs, int numSumTerms
) {
// each thread processes one diagonal element
long long int elemInd = blockIdx.x*blockDim.x + threadIdx.x;
if (elemInd >= op.numElemsPerChunk)
return;
qreal elem = 0;
// elem is (+-) every coefficient, with sign determined by parity
for (int t=0; t<numSumTerms; t++) {
// determine the parity of the Z-targeted qubits in the element's corresponding state
int isOddNumOnes = 0;
for (int q=0; q<op.numQubits; q++)
if (pauliCodes[q + t*op.numQubits] == PAULI_Z)
if (extractBit(q, elemInd))
isOddNumOnes = !isOddNumOnes;
// avoid warp divergence
int sign = 1 - 2*isOddNumOnes; // (-1 if isOddNumOnes, else +1)
elem += termCoeffs[t] * sign;
}
op.deviceOperator.real[elemInd] = elem;
op.deviceOperator.imag[elemInd] = 0;
}
void agnostic_initDiagonalOpFromPauliHamil(DiagonalOp op, PauliHamil hamil) {
// copy args intop GPU memory
enum pauliOpType* d_pauliCodes;
size_t mem_pauliCodes = hamil.numSumTerms * op.numQubits * sizeof *d_pauliCodes;
hipMalloc(&d_pauliCodes, mem_pauliCodes);
hipMemcpy(d_pauliCodes, hamil.pauliCodes, mem_pauliCodes, hipMemcpyHostToDevice);
qreal* d_termCoeffs;
size_t mem_termCoeffs = hamil.numSumTerms * sizeof *d_termCoeffs;
hipMalloc(&d_termCoeffs, mem_termCoeffs);
hipMemcpy(d_termCoeffs, hamil.termCoeffs, mem_termCoeffs, hipMemcpyHostToDevice);
int numThreadsPerBlock = 128;
int numBlocks = ceil(op.numElemsPerChunk / (qreal) numThreadsPerBlock);
hipLaunchKernelGGL(( agnostic_initDiagonalOpFromPauliHamilKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0,
op, d_pauliCodes, d_termCoeffs, hamil.numSumTerms);
// copy populated operator into to RAM
hipDeviceSynchronize();
size_t mem_elems = op.numElemsPerChunk * sizeof *op.real;
hipMemcpy(op.real, op.deviceOperator.real, mem_elems, hipMemcpyDeviceToHost);
hipMemcpy(op.imag, op.deviceOperator.imag, mem_elems, hipMemcpyDeviceToHost);
hipFree(d_pauliCodes);
hipFree(d_termCoeffs);
}
int GPUExists(void){
int deviceCount, device;
int gpuDeviceCount = 0;
struct hipDeviceProp_t properties;
hipError_t cudaResultCode = hipGetDeviceCount(&deviceCount);
if (cudaResultCode != hipSuccess) deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
hipGetDeviceProperties(&properties, device);
if (properties.major != 9999) { /* 9999 means emulation only */
++gpuDeviceCount;
}
}
if (gpuDeviceCount) return 1;
else return 0;
}
QuESTEnv createQuESTEnv(void) {
if (!GPUExists()){
printf("Trying to run GPU code with no GPU available\n");
exit(EXIT_FAILURE);
}
QuESTEnv env;
env.rank=0;
env.numRanks=1;
seedQuESTDefault();
return env;
}
void syncQuESTEnv(QuESTEnv env){
hipDeviceSynchronize();
}
int syncQuESTSuccess(int successCode){
return successCode;
}
void destroyQuESTEnv(QuESTEnv env){
// MPI finalize goes here in MPI version. Call this function anyway for consistency
}
void reportQuESTEnv(QuESTEnv env){
printf("EXECUTION ENVIRONMENT:\n");
printf("Running locally on one node with GPU\n");
printf("Number of ranks is %d\n", env.numRanks);
# ifdef _OPENMP
printf("OpenMP enabled\n");
printf("Number of threads available is %d\n", omp_get_max_threads());
# else
printf("OpenMP disabled\n");
# endif
}
void getEnvironmentString(QuESTEnv env, char str[200]){
// OpenMP can be hybridised with GPU in future, so this check is safe and worthwhile
int ompStatus=0;
int numThreads=1;
# ifdef _OPENMP
ompStatus=1;
numThreads=omp_get_max_threads();
# endif
// there is no reporting of CUDA cores/threads/blocks currently (since non-trivial)
sprintf(str, "CUDA=1 OpenMP=%d MPI=0 threads=%d ranks=1", ompStatus, numThreads);
}
void copyStateToGPU(Qureg qureg)
{
if (DEBUG) printf("Copying data to GPU\n");
hipMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice);
hipMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyHostToDevice);
if (DEBUG) printf("Finished copying data to GPU\n");
}
void copyStateFromGPU(Qureg qureg)
{
hipDeviceSynchronize();
if (DEBUG) printf("Copying data from GPU\n");
hipMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost);
hipMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost);
if (DEBUG) printf("Finished copying data from GPU\n");
}
/** Print the current state vector of probability amplitudes for a set of qubits to standard out.
For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits
*/
void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){
long long int index;
int rank;
copyStateFromGPU(qureg);
if (qureg.numQubitsInStateVec<=5){
for (rank=0; rank<qureg.numChunks; rank++){
if (qureg.chunkId==rank){
if (reportRank) {
printf("Reporting state from rank %d [\n", qureg.chunkId);
//printf("\trank, index, real, imag\n");
printf("real, imag\n");
} else if (rank==0) {
printf("Reporting state [\n");
printf("real, imag\n");
}
for(index=0; index<qureg.numAmpsPerChunk; index++){
printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]);
}
if (reportRank || rank==qureg.numChunks-1) printf("]\n");
}
syncQuESTEnv(env);
}
}
}
qreal statevec_getRealAmp(Qureg qureg, long long int index){
qreal el=0;
hipMemcpy(&el, &(qureg.deviceStateVec.real[index]),
sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost);
return el;
}
qreal statevec_getImagAmp(Qureg qureg, long long int index){
qreal el=0;
hipMemcpy(&el, &(qureg.deviceStateVec.imag[index]),
sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost);
return el;
}
__global__ void statevec_initBlankStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
// initialise the statevector to be all-zeros
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
void statevec_initBlankState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initBlankStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
// initialise the state to |0000..0000>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==0){
// zero state |0000..0000> has probability 1
stateVecReal[0] = 1.0;
stateVecImag[0] = 0.0;
}
}
void statevec_initZeroState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initZeroStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize);
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
}
void statevec_initPlusState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){
long long int index;
// initialise the state to |stateInd>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==stateInd){
// classical state has probability 1
stateVecReal[stateInd] = 1.0;
stateVecImag[stateInd] = 0.0;
}
}
void statevec_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, stateInd);
}
__global__ void statevec_initDebugStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = (index*2.0)/10.0;
stateVecImag[index] = (index*2.0+1.0)/10.0;
}
void statevec_initDebugState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initDebugStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){
long long int index;
int bit;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2);
bit = extractBit(qubitId, index);
if (bit==outcome) {
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
} else {
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
}
void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initStateOfSingleQubitKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome);
}
// returns 1 if successful, else 0
int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){
long long int chunkSize, stateVecSize;
long long int indexInChunk, totalIndex;
chunkSize = qureg->numAmpsPerChunk;
stateVecSize = chunkSize*qureg->numChunks;
qreal *stateVecReal = qureg->stateVec.real;
qreal *stateVecImag = qureg->stateVec.imag;
FILE *fp;
char line[200];
fp = fopen(filename, "r");
if (fp == NULL)
return 0;
indexInChunk = 0; totalIndex = 0;
while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){
if (line[0]!='#'){
int chunkId = totalIndex/chunkSize;
if (chunkId==qureg->chunkId){
# if QuEST_PREC==1
sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==2
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==4
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# endif
indexInChunk += 1;
}
totalIndex += 1;
}
}
fclose(fp);
copyStateToGPU(*qureg);
// indicate success
return 1;
}
int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){
qreal diff;
int chunkSize = mq1.numAmpsPerChunk;
copyStateFromGPU(mq1);
copyStateFromGPU(mq2);
for (int i=0; i<chunkSize; i++){
diff = mq1.stateVec.real[i] - mq2.stateVec.real[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
}
return 1;
}
__global__ void statevec_compactUnitaryKernel (Qureg qureg, int rotQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << rotQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
void statevec_compactUnitary(Qureg qureg, int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_compactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, alpha, beta);
}
__global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
}
void statevec_controlledCompactUnitary(Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledCompactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, alpha, beta);
}
__global__ void statevec_unitaryKernel(Qureg qureg, int targetQubit, ArgMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
void statevec_unitary(Qureg qureg, int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_unitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, argifyMatrix2(u));
}
__global__ void statevec_multiControlledMultiQubitUnitaryKernel(
Qureg qureg, long long int ctrlMask, int* targs, int numTargs,
qreal* uRe, qreal* uIm, long long int* ampInds, qreal* reAmps, qreal* imAmps, long long int numTargAmps)
{
// decide the amplitudes this thread will modify
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
long long int numTasks = qureg.numAmpsPerChunk >> numTargs; // kernel called on every 1 in 2^numTargs amplitudes
if (thisTask>=numTasks) return;
// find this task's start index (where all targs are 0)
long long int ind00 = insertZeroBits(thisTask, targs, numTargs);
// this task only modifies amplitudes if control qubits are 1 for this state
if (ctrlMask && (ctrlMask&ind00) != ctrlMask)
return;
qreal *reVec = qureg.deviceStateVec.real;
qreal *imVec = qureg.deviceStateVec.imag;
/*
each thread needs:
long long int ampInds[numAmps];
qreal reAmps[numAmps];
qreal imAmps[numAmps];
but instead has access to shared arrays, with below stride and offset
*/
size_t stride = gridDim.x*blockDim.x;
size_t offset = blockIdx.x*blockDim.x + threadIdx.x;
// determine the indices and record values of target amps
long long int ind;
for (int i=0; i < numTargAmps; i++) {
// get global index of current target qubit assignment
ind = ind00;
for (int t=0; t < numTargs; t++)
if (extractBit(t, i))
ind = flipBit(ind, targs[t]);
ampInds[i*stride+offset] = ind;
reAmps [i*stride+offset] = reVec[ind];
imAmps [i*stride+offset] = imVec[ind];
}
// update the amplitudes
for (int r=0; r < numTargAmps; r++) {
ind = ampInds[r*stride+offset];
reVec[ind] = 0;
imVec[ind] = 0;
for (int c=0; c < numTargAmps; c++) {
qreal uReElem = uRe[c + r*numTargAmps];
qreal uImElem = uIm[c + r*numTargAmps];
reVec[ind] += reAmps[c*stride+offset]*uReElem - imAmps[c*stride+offset]*uImElem;
imVec[ind] += reAmps[c*stride+offset]*uImElem + imAmps[c*stride+offset]*uReElem;
}
}
}
void statevec_multiControlledMultiQubitUnitary(Qureg qureg, long long int ctrlMask, int* targs, int numTargs, ComplexMatrixN u)
{
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>numTargs)/threadsPerCUDABlock);
// allocate device space for global {targs} (length: numTargs) and populate
int *d_targs;
size_t targMemSize = numTargs * sizeof *d_targs;
hipMalloc(&d_targs, targMemSize);
hipMemcpy(d_targs, targs, targMemSize, hipMemcpyHostToDevice);
// flatten out the u.real and u.imag lists
int uNumRows = (1 << u.numQubits);
qreal* uReFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uReFlat);
qreal* uImFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uImFlat);
long long int i = 0;
for (int r=0; r < uNumRows; r++)
for (int c=0; c < uNumRows; c++) {
uReFlat[i] = u.real[r][c];
uImFlat[i] = u.imag[r][c];
i++;
}
// allocate device space for global u.real and u.imag (flatten by concatenating rows) and populate
qreal* d_uRe;
qreal* d_uIm;
size_t uMemSize = uNumRows*uNumRows * sizeof *d_uRe; // size of each of d_uRe and d_uIm
hipMalloc(&d_uRe, uMemSize);
hipMalloc(&d_uIm, uMemSize);
hipMemcpy(d_uRe, uReFlat, uMemSize, hipMemcpyHostToDevice);
hipMemcpy(d_uIm, uImFlat, uMemSize, hipMemcpyHostToDevice);
// allocate device Wspace for thread-local {ampInds}, {reAmps}, {imAmps} (length: 1<<numTargs)
long long int *d_ampInds;
qreal *d_reAmps;
qreal *d_imAmps;
size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks;
int numTargAmps = uNumRows;
hipMalloc(&d_ampInds, numTargAmps*gridSize * sizeof *d_ampInds);
hipMalloc(&d_reAmps, numTargAmps*gridSize * sizeof *d_reAmps);
hipMalloc(&d_imAmps, numTargAmps*gridSize * sizeof *d_imAmps);
// call kernel
hipLaunchKernelGGL(( statevec_multiControlledMultiQubitUnitaryKernel), dim3(CUDABlocks),dim3(threadsPerCUDABlock), 0, 0,
qureg, ctrlMask, d_targs, numTargs, d_uRe, d_uIm, d_ampInds, d_reAmps, d_imAmps, numTargAmps);
// free kernel memory
free(uReFlat);
free(uImFlat);
hipFree(d_targs);
hipFree(d_uRe);
hipFree(d_uIm);
hipFree(d_ampInds);
hipFree(d_reAmps);
hipFree(d_imAmps);
}
__global__ void statevec_multiControlledTwoQubitUnitaryKernel(Qureg qureg, long long int ctrlMask, int q1, int q2, ArgMatrix4 u){
// decide the 4 amplitudes this thread will modify
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
long long int numTasks = qureg.numAmpsPerChunk >> 2; // kernel called on every 1 in 4 amplitudes
if (thisTask>=numTasks) return;
qreal *reVec = qureg.deviceStateVec.real;
qreal *imVec = qureg.deviceStateVec.imag;
// find indices of amplitudes to modify (treat q1 as the least significant bit)
long long int ind00, ind01, ind10, ind11;
ind00 = insertTwoZeroBits(thisTask, q1, q2);
// modify only if control qubits are 1 for this state
if (ctrlMask && (ctrlMask&ind00) != ctrlMask)
return;
ind01 = flipBit(ind00, q1);
ind10 = flipBit(ind00, q2);
ind11 = flipBit(ind01, q2);
// extract statevec amplitudes
qreal re00, re01, re10, re11;
qreal im00, im01, im10, im11;
re00 = reVec[ind00]; im00 = imVec[ind00];
re01 = reVec[ind01]; im01 = imVec[ind01];
re10 = reVec[ind10]; im10 = imVec[ind10];
re11 = reVec[ind11]; im11 = imVec[ind11];
// apply u * {amp00, amp01, amp10, amp11}
reVec[ind00] =
u.r0c0.real*re00 - u.r0c0.imag*im00 +
u.r0c1.real*re01 - u.r0c1.imag*im01 +
u.r0c2.real*re10 - u.r0c2.imag*im10 +
u.r0c3.real*re11 - u.r0c3.imag*im11;
imVec[ind00] =
u.r0c0.imag*re00 + u.r0c0.real*im00 +
u.r0c1.imag*re01 + u.r0c1.real*im01 +
u.r0c2.imag*re10 + u.r0c2.real*im10 +
u.r0c3.imag*re11 + u.r0c3.real*im11;
reVec[ind01] =
u.r1c0.real*re00 - u.r1c0.imag*im00 +
u.r1c1.real*re01 - u.r1c1.imag*im01 +
u.r1c2.real*re10 - u.r1c2.imag*im10 +
u.r1c3.real*re11 - u.r1c3.imag*im11;
imVec[ind01] =
u.r1c0.imag*re00 + u.r1c0.real*im00 +
u.r1c1.imag*re01 + u.r1c1.real*im01 +
u.r1c2.imag*re10 + u.r1c2.real*im10 +
u.r1c3.imag*re11 + u.r1c3.real*im11;
reVec[ind10] =
u.r2c0.real*re00 - u.r2c0.imag*im00 +
u.r2c1.real*re01 - u.r2c1.imag*im01 +
u.r2c2.real*re10 - u.r2c2.imag*im10 +
u.r2c3.real*re11 - u.r2c3.imag*im11;
imVec[ind10] =
u.r2c0.imag*re00 + u.r2c0.real*im00 +
u.r2c1.imag*re01 + u.r2c1.real*im01 +
u.r2c2.imag*re10 + u.r2c2.real*im10 +
u.r2c3.imag*re11 + u.r2c3.real*im11;
reVec[ind11] =
u.r3c0.real*re00 - u.r3c0.imag*im00 +
u.r3c1.real*re01 - u.r3c1.imag*im01 +
u.r3c2.real*re10 - u.r3c2.imag*im10 +
u.r3c3.real*re11 - u.r3c3.imag*im11;
imVec[ind11] =
u.r3c0.imag*re00 + u.r3c0.real*im00 +
u.r3c1.imag*re01 + u.r3c1.real*im01 +
u.r3c2.imag*re10 + u.r3c2.real*im10 +
u.r3c3.imag*re11 + u.r3c3.real*im11;
}
void statevec_multiControlledTwoQubitUnitary(Qureg qureg, long long int ctrlMask, int q1, int q2, ComplexMatrix4 u)
{
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); // one kernel eval for every 4 amplitudes
hipLaunchKernelGGL(( statevec_multiControlledTwoQubitUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, ctrlMask, q1, q2, argifyMatrix4(u));
}
__global__ void statevec_controlledUnitaryKernel(Qureg qureg, int controlQubit, int targetQubit, ArgMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_controlledUnitary(Qureg qureg, int controlQubit, int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, argifyMatrix2(u));
}
__global__ void statevec_multiControlledUnitaryKernel(
Qureg qureg,
long long int ctrlQubitsMask, long long int ctrlFlipMask,
int targetQubit, ArgMatrix2 u
){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
if (ctrlQubitsMask == (ctrlQubitsMask & (indexUp ^ ctrlFlipMask))) {
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_multiControlledUnitary(
Qureg qureg,
long long int ctrlQubitsMask, long long int ctrlFlipMask,
int targetQubit, ComplexMatrix2 u
){
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiControlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg, ctrlQubitsMask, ctrlFlipMask, targetQubit, argifyMatrix2(u));
}
__global__ void statevec_pauliXKernel(Qureg qureg, int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
void statevec_pauliX(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_pauliXKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit);
}
__global__ void statevec_pauliYKernel(Qureg qureg, int targetQubit, int conjFac){
long long int sizeHalfBlock = 1LL << targetQubit;
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int numTasks = qureg.numAmpsPerChunk >> 1;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
long long int thisBlock = thisTask / sizeHalfBlock;
long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
long long int indexLo = indexUp + sizeHalfBlock;
qreal stateRealUp, stateImagUp;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
void statevec_pauliY(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, 1);
}
void statevec_pauliYConj(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, -1);
}
__global__ void statevec_controlledPauliYKernel(Qureg qureg, int controlQubit, int targetQubit, int conjFac)
{
long long int index;
long long int sizeBlock, sizeHalfBlock;
long long int stateVecSize;
int controlBit;
qreal stateRealUp, stateImagUp;
long long int thisBlock, indexUp, indexLo;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
}
void statevec_controlledPauliY(Qureg qureg, int controlQubit, int targetQubit)
{
int conjFactor = 1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor);
}
void statevec_controlledPauliYConj(Qureg qureg, int controlQubit, int targetQubit)
{
int conjFactor = -1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor);
}
__global__ void statevec_phaseShiftByTermKernel(Qureg qureg, int targetQubit, qreal cosAngle, qreal sinAngle) {
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, indexUp,indexLo;
qreal stateRealLo, stateImagLo;
long long int thisTask;
long long int numTasks = qureg.numAmpsPerChunk >> 1;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
void statevec_phaseShiftByTerm(Qureg qureg, int targetQubit, Complex term)
{
qreal cosAngle = term.real;
qreal sinAngle = term.imag;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_phaseShiftByTermKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, cosAngle, sinAngle);
}
__global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, int idQubit1, int idQubit2, qreal cosAngle, qreal sinAngle)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
qreal stateRealLo, stateImagLo;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_controlledPhaseShift(Qureg qureg, int idQubit1, int idQubit2, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2, cosAngle, sinAngle);
}
__global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) {
qreal stateRealLo, stateImagLo;
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
long long int mask = getQubitBitMask(controlQubits, numControlQubits);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiControlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, cosAngle, sinAngle);
}
__global__ void statevec_multiRotateZKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) {
long long int stateVecSize = qureg.numAmpsPerChunk;
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
int fac = getBitMaskParity(mask & index)? -1 : 1;
qreal stateReal = stateVecReal[index];
qreal stateImag = stateVecImag[index];
stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag;
stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag;
}
void statevec_multiRotateZ(Qureg qureg, long long int mask, qreal angle)
{
qreal cosAngle = cos(angle/2.0);
qreal sinAngle = sin(angle/2.0);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiRotateZKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, cosAngle, sinAngle);
}
__global__ void statevec_multiControlledMultiRotateZKernel(Qureg qureg, long long int ctrlMask, long long int targMask, qreal cosAngle, qreal sinAngle) {
long long int stateVecSize = qureg.numAmpsPerChunk;
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
// amplitudes corresponding to control qubits not all-in-one are unmodified
if (ctrlMask && ((ctrlMask & index) != ctrlMask))
return;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
// avoid warp divergence, setting fac = +- 1
int fac = 1-2*getBitMaskParity(targMask & index);
qreal stateReal = stateVecReal[index];
qreal stateImag = stateVecImag[index];
stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag;
stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag;
}
void statevec_multiControlledMultiRotateZ(Qureg qureg, long long int ctrlMask, long long int targMask, qreal angle)
{
qreal cosAngle = cos(angle/2.0);
qreal sinAngle = sin(angle/2.0);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiControlledMultiRotateZKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, ctrlMask, targMask, cosAngle, sinAngle);
}
qreal densmatr_calcTotalProb(Qureg qureg) {
// computes the trace using Kahan summation
qreal pTotal=0;
qreal y, t, c;
c = 0;
long long int numCols = 1LL << qureg.numQubitsRepresented;
long long diagIndex;
copyStateFromGPU(qureg);
for (int col=0; col< numCols; col++) {
diagIndex = col*(numCols + 1);
y = qureg.stateVec.real[diagIndex] - c;
t = pTotal + y;
c = ( t - pTotal ) - y; // brackets are important
pTotal = t;
}
return pTotal;
}
qreal statevec_calcTotalProb(Qureg qureg){
/* IJB - implemented using Kahan summation for greater accuracy at a slight floating
point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */
/* Don't change the bracketing in this routine! */
qreal pTotal=0;
qreal y, t, c;
long long int index;
long long int numAmpsPerRank = qureg.numAmpsPerChunk;
copyStateFromGPU(qureg);
c = 0.0;
for (index=0; index<numAmpsPerRank; index++){
/* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */
// pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index];
y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
/* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */
//pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index];
y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
}
return pTotal;
}
__global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, int idQubit1, int idQubit2)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_controlledPhaseFlip(Qureg qureg, int idQubit1, int idQubit2)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2);
}
__global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask)
{
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask = getQubitBitMask(controlQubits, numControlQubits);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiControlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask);
}
__global__ void statevec_swapQubitAmpsKernel(Qureg qureg, int qb1, int qb2) {
qreal *reVec = qureg.deviceStateVec.real;
qreal *imVec = qureg.deviceStateVec.imag;
long long int numTasks = qureg.numAmpsPerChunk >> 2; // each iteration updates 2 amps and skips 2 amps
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
long long int ind00, ind01, ind10;
qreal re01, re10, im01, im10;
// determine ind00 of |..0..0..>, |..0..1..> and |..1..0..>
ind00 = insertTwoZeroBits(thisTask, qb1, qb2);
ind01 = flipBit(ind00, qb1);
ind10 = flipBit(ind00, qb2);
// extract statevec amplitudes
re01 = reVec[ind01]; im01 = imVec[ind01];
re10 = reVec[ind10]; im10 = imVec[ind10];
// swap 01 and 10 amps
reVec[ind01] = re10; reVec[ind10] = re01;
imVec[ind01] = im10; imVec[ind10] = im01;
}
void statevec_swapQubitAmps(Qureg qureg, int qb1, int qb2)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_swapQubitAmpsKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, qb1, qb2);
}
__global__ void statevec_hadamardKernel (Qureg qureg, int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal recRoot2 = 1.0/sqrt(2.0);
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo);
stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo);
stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo);
stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo);
}
void statevec_hadamard(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_hadamardKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit);
}
__global__ void statevec_controlledNotKernel(Qureg qureg, int controlQubit, int targetQubit)
{
long long int index;
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
long long int stateVecSize;
int controlBit;
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
}
void statevec_controlledNot(Qureg qureg, int controlQubit, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledNotKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit);
}
__global__ void statevec_multiControlledMultiQubitNotKernel(Qureg qureg, int ctrlMask, int targMask) {
qreal* stateRe = qureg.deviceStateVec.real;
qreal* stateIm = qureg.deviceStateVec.imag;
// althouugh each thread swaps/updates two amplitudes, we still invoke one thread per amp
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= qureg.numAmpsPerChunk)
return;
// modify amplitudes only if control qubits are 1 for this state
if (ctrlMask && ((ctrlMask & ampInd) != ctrlMask))
return;
long long int mateInd = ampInd ^ targMask;
// if the mate is lower index, another thread is handling it
if (mateInd < ampInd)
return;
/* it may seem wasteful to spawn more threads than are needed, and abort
* half of them due to the amp pairing above (and potentially abort
* an exponential number due to ctrlMask). however, since we are moving
* global memory directly in a potentially non-contiguous fashoin, this
* method is likely to be memory bandwidth bottlenecked anyway
*/
qreal mateRe = stateRe[mateInd];
qreal mateIm = stateIm[mateInd];
// swap amp with mate
stateRe[mateInd] = stateRe[ampInd];
stateIm[mateInd] = stateIm[ampInd];
stateRe[ampInd] = mateRe;
stateIm[ampInd] = mateIm;
}
void statevec_multiControlledMultiQubitNot(Qureg qureg, int ctrlMask, int targMask) {
int numThreadsPerBlock = 128;
int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock);
hipLaunchKernelGGL(( statevec_multiControlledMultiQubitNotKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, qureg, ctrlMask, targMask);
}
__device__ __host__ unsigned int log2Int( unsigned int x )
{
unsigned int ans = 0 ;
while( x>>=1 ) ans++;
return ans ;
}
__device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){
int i, l, r;
int threadMax, maxDepth;
threadMax = length/2;
maxDepth = log2Int(length/2);
for (i=0; i<maxDepth+1; i++){
if (threadIdx.x<threadMax){
l = threadIdx.x;
r = l + threadMax;
arrayIn[l] = arrayIn[r] + arrayIn[l];
}
threadMax = threadMax >> 1;
__syncthreads(); // optimise -- use warp shuffle instead
}
if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0];
}
__global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){
extern __shared__ qreal tempReductionArray[];
int blockOffset = blockIdx.x*length;
tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2];
tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1];
__syncthreads();
reduceBlock(tempReductionArray, reducedArray, length);
}
__global__ void densmatr_findProbabilityOfZeroKernel(
Qureg qureg, int measureQubit, qreal *reducedArray
) {
// run by each thread
// use of block here refers to contiguous amplitudes where measureQubit = 0,
// (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numTasks = densityDim >> 1;
long long int sizeHalfBlock = 1LL << (measureQubit);
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int thisBlock; // which block this thread is processing
long long int thisTask; // which part of the block this thread is processing
long long int basisIndex; // index of this thread's computational basis state
long long int densityIndex; // " " index of |basis><basis| in the flat density matrix
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
// figure out which density matrix prob that this thread is assigned
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
densityIndex = (densityDim + 1) * basisIndex;
// record the probability in the CUDA-BLOCK-wide array
qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0
tempReductionArray[threadIdx.x] = prob;
// sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
__global__ void statevec_findProbabilityOfZeroKernel(
Qureg qureg, int measureQubit, qreal *reducedArray
) {
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
// (good for shared memory parallelism)
extern __shared__ qreal tempReductionArray[];
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
qreal realVal, imagVal;
realVal = stateVecReal[index];
imagVal = stateVecImag[index];
tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal;
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){
int levels=0;
while (numValuesToReduce){
numValuesToReduce = numValuesToReduce/numReducedPerLevel;
levels++;
}
return levels;
}
void swapDouble(qreal **a, qreal **b){
qreal *temp;
temp = *a;
*a = *b;
*b = temp;
}
qreal densmatr_findProbabilityOfZero(Qureg qureg, int measureQubit)
{
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
hipLaunchKernelGGL(( densmatr_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg, measureQubit, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal zeroProb;
hipMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return zeroProb;
}
qreal statevec_findProbabilityOfZero(Qureg qureg, int measureQubit)
{
long long int numValuesToReduce = qureg.numAmpsPerChunk>>1;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
qreal stateProb=0;
int firstTime=1;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
while(numValuesToReduce>1){
if (numValuesToReduce<maxReducedPerLevel){
// Need less than one CUDA block to reduce values
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
} else {
// Use full CUDA blocks, with block size constrained by shared mem usage
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime){
hipLaunchKernelGGL(( statevec_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg, measureQubit, qureg.firstLevelReduction);
firstTime=0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return stateProb;
}
qreal statevec_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome)
{
qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
qreal densmatr_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome)
{
qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
// atomicAdd on floats/doubles isn't available on <6 CC devices, so we add it ourselves
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__global__ void statevec_calcProbOfAllOutcomesKernel(
qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits
) {
// each thread handles one amplitude (all amplitudes are involved)
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= qureg.numAmpsTotal) return;
qreal prob = (
qureg.deviceStateVec.real[ampInd]*qureg.deviceStateVec.real[ampInd] +
qureg.deviceStateVec.imag[ampInd]*qureg.deviceStateVec.imag[ampInd]);
// each amplitude contributes to one outcome
long long int outcomeInd = 0;
for (int q=0; q<numQubits; q++)
outcomeInd += extractBit(qubits[q], ampInd) * (1LL << q);
// each thread atomically writes directly to the global output.
// this beat block-heirarchal atomic reductions in both global and shared memory!
atomicAdd(&outcomeProbs[outcomeInd], prob);
}
void statevec_calcProbOfAllOutcomes(qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits) {
// copy qubits to GPU memory
int* d_qubits;
size_t mem_qubits = numQubits * sizeof *d_qubits;
hipMalloc(&d_qubits, mem_qubits);
hipMemcpy(d_qubits, qubits, mem_qubits, hipMemcpyHostToDevice);
// create one thread for every amplitude
int numThreadsPerBlock = 128;
int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock);
// create global GPU array for outcomeProbs
qreal* d_outcomeProbs;
long long int numOutcomes = (1LL << numQubits);
size_t mem_outcomeProbs = numOutcomes * sizeof *d_outcomeProbs;
hipMalloc(&d_outcomeProbs, mem_outcomeProbs);
hipMemset(d_outcomeProbs, 0, mem_outcomeProbs);
// populate per-block subarrays
hipLaunchKernelGGL(( statevec_calcProbOfAllOutcomesKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0,
d_outcomeProbs, qureg, d_qubits, numQubits);
// copy outcomeProbs from GPU memory
hipMemcpy(outcomeProbs, d_outcomeProbs, mem_outcomeProbs, hipMemcpyDeviceToHost);
// free GPU memory
hipFree(d_qubits);
hipFree(d_outcomeProbs);
}
__global__ void densmatr_calcProbOfAllOutcomesKernel(
qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits
) {
// each thread handles one diagonal amplitude
long long int diagInd = blockIdx.x*blockDim.x + threadIdx.x;
long long int numDiags = (1LL << qureg.numQubitsRepresented);
if (diagInd >= numDiags) return;
long long int flatInd = (1 + numDiags)*diagInd;
qreal prob = qureg.deviceStateVec.real[flatInd]; // im[flatInd] assumed ~ 0
// each diagonal amplitude contributes to one outcome
long long int outcomeInd = 0;
for (int q=0; q<numQubits; q++)
outcomeInd += extractBit(qubits[q], diagInd) * (1LL << q);
// each thread atomically writes directly to the global output.
// this beat block-heirarchal atomic reductions in both global and shared memory!
atomicAdd(&outcomeProbs[outcomeInd], prob);
}
void densmatr_calcProbOfAllOutcomes(qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits) {
// copy qubits to GPU memory
int* d_qubits;
size_t mem_qubits = numQubits * sizeof *d_qubits;
hipMalloc(&d_qubits, mem_qubits);
hipMemcpy(d_qubits, qubits, mem_qubits, hipMemcpyHostToDevice);
// create global array, with per-block subarrays
int numThreadsPerBlock = 128;
int numDiags = (1LL << qureg.numQubitsRepresented);
int numBlocks = ceil(numDiags / (qreal) numThreadsPerBlock);
// create global GPU array for outcomeProbs
qreal* d_outcomeProbs;
long long int numOutcomes = (1LL << numQubits);
size_t mem_outcomeProbs = numOutcomes * sizeof *d_outcomeProbs;
hipMalloc(&d_outcomeProbs, mem_outcomeProbs);
hipMemset(d_outcomeProbs, 0, mem_outcomeProbs);
// populate per-block subarrays
hipLaunchKernelGGL(( densmatr_calcProbOfAllOutcomesKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0,
d_outcomeProbs, qureg, d_qubits, numQubits);
// copy outcomeProbs from GPU memory
hipMemcpy(outcomeProbs, d_outcomeProbs, mem_outcomeProbs, hipMemcpyDeviceToHost);
// free GPU memory
hipFree(d_qubits);
hipFree(d_outcomeProbs);
}
/** computes Tr(conjTrans(a) b) = sum of (a_ij^* b_ij), which is a real number */
__global__ void densmatr_calcInnerProductKernel(
Qureg a, Qureg b, long long int numTermsToSum, qreal* reducedArray
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
// Re{ conj(a) b } = Re{ (aRe - i aIm)(bRe + i bIm) } = aRe bRe + aIm bIm
qreal prod = (
a.deviceStateVec.real[index]*b.deviceStateVec.real[index]
+ a.deviceStateVec.imag[index]*b.deviceStateVec.imag[index]);
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = prod;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
qreal densmatr_calcInnerProduct(Qureg a, Qureg b) {
// we're summing the square of every term in the density matrix
long long int numValuesToReduce = a.numAmpsTotal;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the terms in each block
// arbitrarily store the reduction in the b qureg's array
if (firstTime) {
hipLaunchKernelGGL(( densmatr_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
a, b, a.numAmpsTotal, b.firstLevelReduction);
firstTime = 0;
}
// sum the block terms
else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
b.firstLevelReduction,
b.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(b.firstLevelReduction), &(b.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal innerprod;
hipMemcpy(&innerprod, b.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return innerprod;
}
/** computes either a real or imag term in the inner product */
__global__ void statevec_calcInnerProductKernel(
int getRealComp,
qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2,
long long int numTermsToSum, qreal* reducedArray)
{
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
// choose whether to calculate the real or imaginary term of the inner product
qreal innerProdTerm;
if (getRealComp)
innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index];
else
innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index];
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = innerProdTerm;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the
* inner product, so as to not have to worry about keeping the sums separated during reduction.
* Truly disgusting, probably doubles runtime, please fix.
* @todo could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc?
*/
Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) {
qreal innerProdReal, innerProdImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
// return complex
Complex innerProd;
innerProd.real = innerProdReal;
innerProd.imag = innerProdImag;
return innerProd;
}
/** computes one term of (vec^*T) dens * vec */
__global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) {
// figure out which density matrix row to consider
long long int col;
long long int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= dim) return;
qreal* densReal = dens.deviceStateVec.real;
qreal* densImag = dens.deviceStateVec.imag;
qreal* vecReal = vec.deviceStateVec.real;
qreal* vecImag = vec.deviceStateVec.imag;
// compute the row-th element of the product dens*vec
qreal prodReal = 0;
qreal prodImag = 0;
for (col=0LL; col < dim; col++) {
qreal densElemReal = densReal[dim*col + row];
qreal densElemImag = densImag[dim*col + row];
prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col];
prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col];
}
// multiply with row-th elem of (vec^*)
qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row];
// imag of every term should be zero, because each is a valid fidelity calc of an eigenstate
//qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row];
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = termReal;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) {
// we're summing the square of every term in the density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
// store the reduction in the pureState array
if (firstTime) {
hipLaunchKernelGGL(( densmatr_calcFidelityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg, pureState, densityDim, pureState.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
pureState.firstLevelReduction,
pureState.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal fidelity;
hipMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return fidelity;
}
__global__ void densmatr_calcHilbertSchmidtDistanceSquaredKernel(
qreal* aRe, qreal* aIm, qreal* bRe, qreal* bIm,
long long int numAmpsToSum, qreal *reducedArray
) {
// figure out which density matrix term this thread is assigned
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numAmpsToSum) return;
// compute this thread's sum term
qreal difRe = aRe[index] - bRe[index];
qreal difIm = aIm[index] - bIm[index];
qreal term = difRe*difRe + difIm*difIm;
// array of each thread's collected term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = term;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/* computes sqrt(Tr( (a-b) conjTrans(a-b) ) = sqrt( sum of abs vals of (a-b)) */
qreal densmatr_calcHilbertSchmidtDistance(Qureg a, Qureg b) {
// we're summing the square of every term in (a-b)
long long int numValuesToReduce = a.numAmpsPerChunk;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block (store reduction temp values in a's reduction array)
if (firstTime) {
hipLaunchKernelGGL(( densmatr_calcHilbertSchmidtDistanceSquaredKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
a.deviceStateVec.real, a.deviceStateVec.imag,
b.deviceStateVec.real, b.deviceStateVec.imag,
numValuesToReduce, a.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
a.firstLevelReduction,
a.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(a.firstLevelReduction), &(a.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal trace;
hipMemcpy(&trace, a.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
qreal sqrtTrace = sqrt(trace);
return sqrtTrace;
}
__global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) {
// figure out which density matrix term this thread is assigned
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numAmpsToSum) return;
qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = term;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Computes the trace of the density matrix squared */
qreal densmatr_calcPurity(Qureg qureg) {
// we're summing the square of every term in the density matrix
long long int numValuesToReduce = qureg.numAmpsPerChunk;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
hipLaunchKernelGGL(( densmatr_calcPurityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
numValuesToReduce, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal traceDensSquared;
hipMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return traceDensSquared;
}
__global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability)
{
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- measured probability
qreal renorm; // probability (returned) value
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
// (good for shared memory parallelism)
long long int numTasks=qureg.numAmpsPerChunk>>1;
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
renorm=1/sqrt(totalProbability);
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
if (outcome==0){
stateVecReal[index]=stateVecReal[index]*renorm;
stateVecImag[index]=stateVecImag[index]*renorm;
stateVecReal[index+sizeHalfBlock]=0;
stateVecImag[index+sizeHalfBlock]=0;
} else if (outcome==1){
stateVecReal[index]=0;
stateVecImag[index]=0;
stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm;
stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm;
}
}
/*
* outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or
* else the state-vector will lose normalisation
*/
void statevec_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, measureQubit, outcome, outcomeProb);
}
/** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */
__global__ void densmatr_collapseToKnownProbOutcomeKernel(
qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit,
long long int part1, long long int part2, long long int part3,
long long int rowBit, long long int colBit, long long int desired, long long int undesired)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numBasesToVisit) return;
long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
// renormalise desired outcome
vecReal[base + desired] /= outcomeProb;
vecImag[base + desired] /= outcomeProb;
// kill undesired outcome
vecReal[base + undesired] = 0;
vecImag[base + undesired] = 0;
// kill |..0..><..1..| states
vecReal[base + colBit] = 0;
vecImag[base + colBit] = 0;
vecReal[base + rowBit] = 0;
vecImag[base + rowBit] = 0;
}
/** This involves finding |...i...><...j...| states and killing those where i!=j */
void densmatr_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) {
int rowQubit = measureQubit + qureg.numQubitsRepresented;
int colBit = 1LL << measureQubit;
int rowBit = 1LL << rowQubit;
long long int numBasesToVisit = qureg.numAmpsPerChunk/4;
long long int part1 = colBit -1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numBasesToVisit - (rowBit >> 1);
long long int desired, undesired;
if (outcome == 0) {
desired = 0;
undesired = colBit | rowBit;
} else {
desired = colBit | rowBit;
undesired = 0;
}
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit,
part1, part2, part3, rowBit, colBit, desired, undesired);
}
__global__ void densmatr_mixDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) {
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= numAmpsToVisit) return;
combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd];
combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd];
}
void densmatr_mixDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) {
long long int numAmpsToVisit = combineQureg.numAmpsPerChunk;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_mixDensityMatrixKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
combineQureg, otherProb, otherQureg, numAmpsToVisit
);
}
/** Called once for every 4 amplitudes in density matrix
* Works by establishing the |..0..><..0..| state (for its given index) then
* visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3|
* From the brain of Simon Benjamin
*/
__global__ void densmatr_mixDephasingKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int colBit, long long int rowBit)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
vecReal[ampInd + colBit] *= fac;
vecImag[ampInd + colBit] *= fac;
vecReal[ampInd + rowBit] *= fac;
vecImag[ampInd + rowBit] *= fac;
}
void densmatr_oneQubitDegradeOffDiagonal(Qureg qureg, int targetQubit, qreal dephFac) {
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_mixDephasingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, colBit, rowBit);
}
void densmatr_mixDephasing(Qureg qureg, int targetQubit, qreal dephase) {
if (dephase == 0)
return;
qreal dephFac = 1 - dephase;
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephFac);
}
/** Called 12 times for every 16 amplitudes in density matrix
* Each sums from the |..0..0..><..0..0..| index to visit either
* |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..|
* etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|.
* From the brain of Simon Benjamin
*/
__global__ void densmatr_mixTwoQubitDephasingKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3, long long int part4, long long int part5,
long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2)
{
long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x;
if (outerInd >= numAmpsToVisit) return;
// sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A|
int meta = 1 + (outerInd/numBackgroundStates);
if (meta > 4) meta++;
if (meta > 9) meta++;
long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2);
long long int scanInd = outerInd % numBackgroundStates;
long long int stateInd = (
shift +
(scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4));
vecReal[stateInd] *= fac;
vecImag[stateInd] *= fac;
}
// @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems?
void densmatr_mixTwoQubitDephasing(Qureg qureg, int qubit1, int qubit2, qreal dephase) {
if (dephase == 0)
return;
// assumes qubit2 > qubit1
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3);
qreal dephFac = 1 - dephase;
// refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed)
long long int numBackgroundStates = qureg.numAmpsPerChunk/16;
// 12 of these states experience dephasing
long long int numAmpsToVisit = 12 * numBackgroundStates;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_mixTwoQubitDephasingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit,
part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2);
}
/** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_mixDepolarisingKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]);
qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]);
vecReal[baseInd] *= 1 - depolLevel;
vecImag[baseInd] *= 1 - depolLevel;
vecReal[targetInd] *= 1 - depolLevel;
vecImag[targetInd] *= 1 - depolLevel;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
vecReal[targetInd] += realAvDepol;
vecImag[targetInd] += imagAvDepol;
}
/** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_mixDampingKernel(
qreal damping, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = damping * ( vecReal[targetInd]);
qreal imagAvDepol = damping * ( vecImag[targetInd]);
vecReal[targetInd] *= 1 - damping;
vecImag[targetInd] *= 1 - damping;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
}
void densmatr_mixDepolarising(Qureg qureg, int targetQubit, qreal depolLevel) {
if (depolLevel == 0)
return;
densmatr_mixDephasing(qureg, targetQubit, depolLevel);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_mixDepolarisingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
void densmatr_mixDamping(Qureg qureg, int targetQubit, qreal damping) {
if (damping == 0)
return;
qreal dephase = sqrt(1-damping);
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephase);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_mixDampingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
damping, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
/** Called once for every 16 amplitudes */
__global__ void densmatr_mixTwoQubitDepolarisingKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int part4, long long int part5,
long long int rowCol1, long long int rowCol2)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
// index of |..0..0..><..0..0|
long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4);
long long int ind01 = ind00 + rowCol1;
long long int ind10 = ind00 + rowCol2;
long long int ind11 = ind00 + rowCol1 + rowCol2;
qreal realAvDepol = depolLevel * 0.25 * (
vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]);
qreal imagAvDepol = depolLevel * 0.25 * (
vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]);
qreal retain = 1 - depolLevel;
vecReal[ind00] *= retain; vecImag[ind00] *= retain;
vecReal[ind01] *= retain; vecImag[ind01] *= retain;
vecReal[ind10] *= retain; vecImag[ind10] *= retain;
vecReal[ind11] *= retain; vecImag[ind11] *= retain;
vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol;
vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol;
vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol;
vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol;
}
void densmatr_mixTwoQubitDepolarising(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) {
if (depolLevel == 0)
return;
// assumes qubit2 > qubit1
densmatr_mixTwoQubitDephasing(qureg, qubit1, qubit2, depolLevel);
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int rowCol1 = colBit1 | rowBit1;
long long int rowCol2 = colBit2 | rowBit2;
long long int numAmpsToVisit = qureg.numAmpsPerChunk/16;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = numAmpsToVisit - (rowBit2 >> 3);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_mixTwoQubitDepolarisingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, part4, part5, rowCol1, rowCol2);
}
__global__ void statevec_setWeightedQuregKernel(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) {
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
long long int numAmpsToVisit = qureg1.numAmpsPerChunk;
if (ampInd >= numAmpsToVisit) return;
qreal *vecRe1 = qureg1.deviceStateVec.real;
qreal *vecIm1 = qureg1.deviceStateVec.imag;
qreal *vecRe2 = qureg2.deviceStateVec.real;
qreal *vecIm2 = qureg2.deviceStateVec.imag;
qreal *vecReOut = out.deviceStateVec.real;
qreal *vecImOut = out.deviceStateVec.imag;
qreal facRe1 = fac1.real;
qreal facIm1 = fac1.imag;
qreal facRe2 = fac2.real;
qreal facIm2 = fac2.imag;
qreal facReOut = facOut.real;
qreal facImOut = facOut.imag;
qreal re1,im1, re2,im2, reOut,imOut;
long long int index = ampInd;
re1 = vecRe1[index]; im1 = vecIm1[index];
re2 = vecRe2[index]; im2 = vecIm2[index];
reOut = vecReOut[index];
imOut = vecImOut[index];
vecReOut[index] = (facReOut*reOut - facImOut*imOut) + (facRe1*re1 - facIm1*im1) + (facRe2*re2 - facIm2*im2);
vecImOut[index] = (facReOut*imOut + facImOut*reOut) + (facRe1*im1 + facIm1*re1) + (facRe2*im2 + facIm2*re2);
}
void statevec_setWeightedQureg(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) {
long long int numAmpsToVisit = qureg1.numAmpsPerChunk;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_setWeightedQuregKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
fac1, qureg1, fac2, qureg2, facOut, out
);
}
__global__ void statevec_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) {
// each thread modifies one value; a wasteful and inefficient strategy
long long int numTasks = qureg.numAmpsPerChunk;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask >= numTasks) return;
qreal* stateRe = qureg.deviceStateVec.real;
qreal* stateIm = qureg.deviceStateVec.imag;
qreal* opRe = op.deviceOperator.real;
qreal* opIm = op.deviceOperator.imag;
qreal a = stateRe[thisTask];
qreal b = stateIm[thisTask];
qreal c = opRe[thisTask];
qreal d = opIm[thisTask];
// (a + b i)(c + d i) = (a c - b d) + i (a d + b c)
stateRe[thisTask] = a*c - b*d;
stateIm[thisTask] = a*d + b*c;
}
void statevec_applyDiagonalOp(Qureg qureg, DiagonalOp op)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_applyDiagonalOpKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, op);
}
__global__ void densmatr_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) {
// each thread modifies one value; a wasteful and inefficient strategy
long long int numTasks = qureg.numAmpsPerChunk;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask >= numTasks) return;
qreal* stateRe = qureg.deviceStateVec.real;
qreal* stateIm = qureg.deviceStateVec.imag;
qreal* opRe = op.deviceOperator.real;
qreal* opIm = op.deviceOperator.imag;
int opDim = (1 << op.numQubits);
qreal a = stateRe[thisTask];
qreal b = stateIm[thisTask];
qreal c = opRe[thisTask % opDim];
qreal d = opIm[thisTask % opDim];
// (a + b i)(c + d i) = (a c - b d) + i (a d + b c)
stateRe[thisTask] = a*c - b*d;
stateIm[thisTask] = a*d + b*c;
}
void densmatr_applyDiagonalOp(Qureg qureg, DiagonalOp op) {
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_applyDiagonalOpKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, op);
}
/** computes either a real or imag term of |vec_i|^2 op_i */
__global__ void statevec_calcExpecDiagonalOpKernel(
int getRealComp,
qreal* vecReal, qreal* vecImag, qreal* opReal, qreal* opImag,
long long int numTermsToSum, qreal* reducedArray)
{
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
qreal vecAbs = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index];
// choose whether to calculate the real or imaginary term of the expec term
qreal expecVal;
if (getRealComp)
expecVal = vecAbs * opReal[index];
else
expecVal = vecAbs * opImag[index];
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = expecVal;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
Complex statevec_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) {
/* @TODO: remove all this reduction boilerplate from QuEST GPU
* (e.g. a func which accepts a pointer to do every-value reduction?)
*/
qreal expecReal, expecImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
hipLaunchKernelGGL(( statevec_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
hipLaunchKernelGGL(( statevec_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
// return complex
Complex expecVal;
expecVal.real = expecReal;
expecVal.imag = expecImag;
return expecVal;
}
__global__ void densmatr_calcExpecDiagonalOpKernel(
int getRealComp,
qreal* matReal, qreal* matImag, qreal* opReal, qreal* opImag,
int numQubits, long long int numTermsToSum, qreal* reducedArray)
{
/** if the thread represents a diagonal op, then it computes either a
* real or imag term of matr_{ii} op_i. Otherwise, it writes a 0 to the
* reduction array
*/
// index will identy one of the 2^Q diagonals to be summed
long long int matInd = blockIdx.x*blockDim.x + threadIdx.x;
if (matInd >= numTermsToSum) return;
long long int diagSpacing = (1LL << numQubits) + 1LL;
int isDiag = ((matInd % diagSpacing) == 0);
long long int opInd = matInd / diagSpacing;
qreal val = 0;
if (isDiag) {
qreal matRe = matReal[matInd];
qreal matIm = matImag[matInd];
qreal opRe = opReal[opInd];
qreal opIm = opImag[opInd];
// (matRe + matIm i)(opRe + opIm i) =
// (matRe opRe - matIm opIm) + i (matRe opIm + matIm opRe)
if (getRealComp)
val = matRe * opRe - matIm * opIm;
else
val = matRe * opIm + matIm * opRe;
}
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = val;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
Complex densmatr_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) {
/* @TODO: remove all this reduction boilerplate from QuEST GPU
* (e.g. a func which accepts a pointer to do every-value reduction?)
*/
qreal expecReal, expecImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
hipLaunchKernelGGL(( densmatr_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
op.numQubits, numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
hipLaunchKernelGGL(( densmatr_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
op.numQubits, numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
// return complex
Complex expecVal;
expecVal.real = expecReal;
expecVal.imag = expecImag;
return expecVal;
}
void agnostic_setDiagonalOpElems(DiagonalOp op, long long int startInd, qreal* real, qreal* imag, long long int numElems) {
// update both RAM and VRAM, for consistency
memcpy(&op.real[startInd], real, numElems * sizeof(qreal));
memcpy(&op.imag[startInd], imag, numElems * sizeof(qreal));
hipDeviceSynchronize();
hipMemcpy(
op.deviceOperator.real + startInd,
real,
numElems * sizeof(*(op.deviceOperator.real)),
hipMemcpyHostToDevice);
hipMemcpy(
op.deviceOperator.imag + startInd,
imag,
numElems * sizeof(*(op.deviceOperator.imag)),
hipMemcpyHostToDevice);
}
__global__ void statevec_applyPhaseFuncOverridesKernel(
Qureg qureg, int* qubits, int numQubits, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int numTerms,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=qureg.numAmpsPerChunk) return;
// determine global amplitude index (non-distributed, so it's just local index)
long long int globalAmpInd = index;
// determine phase index of {qubits}
long long int phaseInd = 0LL;
if (encoding == UNSIGNED) {
for (int q=0; q<numQubits; q++)
phaseInd += (1LL << q) * extractBit(qubits[q], globalAmpInd);
}
else if (encoding == TWOS_COMPLEMENT) {
for (int q=0; q<numQubits-1; q++) // use final qubit to indicate sign
phaseInd += (1LL << q) * extractBit(qubits[q], globalAmpInd);
if (extractBit(qubits[numQubits-1], globalAmpInd) == 1)
phaseInd -= (1LL << (numQubits-1));
}
// determine if this phase index has an overriden value (i < numOverrides)
int i;
for (i=0; i<numOverrides; i++)
if (phaseInd == overrideInds[i])
break;
// determine phase from {coeffs}, {exponents} (unless overriden)
qreal phase = 0;
if (i < numOverrides)
phase = overridePhases[i];
else
for (int t=0; t<numTerms; t++)
phase += coeffs[t] * pow(phaseInd, exponents[t]);
// negate phase to conjugate operator
if (conj)
phase *= -1;
// modify amp to amp * exp(i phase)
qreal c = cos(phase);
qreal s = sin(phase);
qreal re = qureg.deviceStateVec.real[index];
qreal im = qureg.deviceStateVec.imag[index];
// = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)}
qureg.deviceStateVec.real[index] = re*c - im*s;
qureg.deviceStateVec.imag[index] = re*s + im*c;
}
void statevec_applyPhaseFuncOverrides(
Qureg qureg, int* qubits, int numQubits, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int numTerms,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
// allocate device space for global list of {qubits}, {coeffs}, {exponents}, {overrideInds} and {overridePhases}
int* d_qubits; size_t mem_qubits = numQubits * sizeof *d_qubits;
qreal* d_coeffs; size_t mem_terms = numTerms * sizeof *d_coeffs;
qreal* d_exponents;
long long int* d_overrideInds; size_t mem_inds = numOverrides * sizeof *d_overrideInds;
qreal* d_overridePhases; size_t mem_phas = numOverrides * sizeof *d_overridePhases;
hipMalloc(&d_qubits, mem_qubits); hipMemcpy(d_qubits, qubits, mem_qubits, hipMemcpyHostToDevice);
hipMalloc(&d_coeffs, mem_terms); hipMemcpy(d_coeffs, coeffs, mem_terms, hipMemcpyHostToDevice);
hipMalloc(&d_exponents, mem_terms); hipMemcpy(d_exponents, exponents, mem_terms, hipMemcpyHostToDevice);
hipMalloc(&d_overrideInds, mem_inds); hipMemcpy(d_overrideInds, overrideInds, mem_inds, hipMemcpyHostToDevice);
hipMalloc(&d_overridePhases,mem_phas); hipMemcpy(d_overridePhases, overridePhases, mem_phas, hipMemcpyHostToDevice);
// call kernel
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_applyPhaseFuncOverridesKernel), dim3(CUDABlocks),dim3(threadsPerCUDABlock), 0, 0,
qureg, d_qubits, numQubits, encoding,
d_coeffs, d_exponents, numTerms,
d_overrideInds, d_overridePhases, numOverrides,
conj);
// cleanup device memory
hipFree(d_qubits);
hipFree(d_coeffs);
hipFree(d_exponents);
hipFree(d_overrideInds);
hipFree(d_overridePhases);
}
__global__ void statevec_applyMultiVarPhaseFuncOverridesKernel(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int* numTermsPerReg,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
long long int *phaseInds,
int conj
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=qureg.numAmpsPerChunk) return;
// determine global amplitude index (non-distributed, so it's just local index)
long long int globalAmpInd = index;
/*
* each thread needs to write to a local:
* long long int phaseInds[numRegs];
* but instead has access to shared array phaseInds, with below stride and offset
*/
size_t stride = gridDim.x*blockDim.x;
size_t offset = blockIdx.x*blockDim.x + threadIdx.x;
// determine phase indices
int flatInd = 0;
if (encoding == UNSIGNED) {
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
}
}
else if (encoding == TWOS_COMPLEMENT) {
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]-1; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
// use final qubit to indicate sign
if (extractBit(qubits[flatInd++], globalAmpInd) == 1)
phaseInds[r*stride+offset] -= (1LL << (numQubitsPerReg[r]-1));
}
}
// determine if this phase index has an overriden value (i < numOverrides)
int i;
for (i=0; i<numOverrides; i++) {
int found = 1;
for (int r=0; r<numRegs; r++) {
if (phaseInds[r*stride+offset] != overrideInds[i*numRegs+r]) {
found = 0;
break;
}
}
if (found)
break;
}
// compute the phase (unless overriden)
qreal phase = 0;
if (i < numOverrides)
phase = overridePhases[i];
else {
flatInd = 0;
for (int r=0; r<numRegs; r++) {
for (int t=0; t<numTermsPerReg[r]; t++) {
phase += coeffs[flatInd] * pow(phaseInds[r*stride+offset], exponents[flatInd]);
flatInd++;
}
}
}
// negate phase to conjugate operator
if (conj)
phase *= -1;
// modify amp to amp * exp(i phase)
qreal c = cos(phase);
qreal s = sin(phase);
qreal re = qureg.deviceStateVec.real[index];
qreal im = qureg.deviceStateVec.imag[index];
// = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)}
qureg.deviceStateVec.real[index] = re*c - im*s;
qureg.deviceStateVec.imag[index] = re*s + im*c;
}
void statevec_applyMultiVarPhaseFuncOverrides(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int* numTermsPerReg,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
// determine size of arrays, for cloning into GPU memory
size_t mem_numQubitsPerReg = numRegs * sizeof *numQubitsPerReg;
size_t mem_numTermsPerReg = numRegs * sizeof *numTermsPerReg;
size_t mem_overridePhases = numOverrides * sizeof *overridePhases;
size_t mem_overrideInds = numOverrides * numRegs * sizeof *overrideInds;
size_t mem_qubits = 0;
size_t mem_coeffs = 0;
size_t mem_exponents = 0;
for (int r=0; r<numRegs; r++) {
mem_qubits += numQubitsPerReg[r] * sizeof *qubits;
mem_coeffs += numTermsPerReg[r] * sizeof *coeffs;
mem_exponents += numTermsPerReg[r] * sizeof *exponents;
}
// allocate global GPU memory
int* d_qubits; hipMalloc(&d_qubits, mem_qubits);
qreal* d_coeffs; hipMalloc(&d_coeffs, mem_coeffs);
qreal* d_exponents; hipMalloc(&d_exponents, mem_exponents);
int* d_numQubitsPerReg; hipMalloc(&d_numQubitsPerReg, mem_numQubitsPerReg);
int* d_numTermsPerReg; hipMalloc(&d_numTermsPerReg, mem_numTermsPerReg);
long long int* d_overrideInds; hipMalloc(&d_overrideInds, mem_overrideInds);
qreal* d_overridePhases; hipMalloc(&d_overridePhases, mem_overridePhases);
// copy function args into GPU memory
hipMemcpy(d_qubits, qubits, mem_qubits, hipMemcpyHostToDevice);
hipMemcpy(d_coeffs, coeffs, mem_coeffs, hipMemcpyHostToDevice);
hipMemcpy(d_exponents, exponents, mem_exponents, hipMemcpyHostToDevice);
hipMemcpy(d_numQubitsPerReg, numQubitsPerReg, mem_numQubitsPerReg, hipMemcpyHostToDevice);
hipMemcpy(d_numTermsPerReg, numTermsPerReg, mem_numTermsPerReg, hipMemcpyHostToDevice);
hipMemcpy(d_overrideInds, overrideInds, mem_overrideInds, hipMemcpyHostToDevice);
hipMemcpy(d_overridePhases, overridePhases, mem_overridePhases, hipMemcpyHostToDevice);
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock);
// allocate thread-local working space {phaseInds}
long long int *d_phaseInds;
size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks;
hipMalloc(&d_phaseInds, numRegs*gridSize * sizeof *d_phaseInds);
// call kernel
hipLaunchKernelGGL(( statevec_applyMultiVarPhaseFuncOverridesKernel), dim3(CUDABlocks),dim3(threadsPerCUDABlock), 0, 0,
qureg, d_qubits, d_numQubitsPerReg, numRegs, encoding,
d_coeffs, d_exponents, d_numTermsPerReg,
d_overrideInds, d_overridePhases, numOverrides,
d_phaseInds,
conj);
// free device memory
hipFree(d_qubits);
hipFree(d_coeffs);
hipFree(d_exponents);
hipFree(d_numQubitsPerReg);
hipFree(d_numTermsPerReg);
hipFree(d_overrideInds);
hipFree(d_overridePhases);
hipFree(d_phaseInds);
}
__global__ void statevec_applyParamNamedPhaseFuncOverridesKernel(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
enum phaseFunc phaseFuncName, qreal* params, int numParams,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
long long int* phaseInds,
int conj
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=qureg.numAmpsPerChunk) return;
// determine global amplitude index (non-distributed, so it's just local index)
long long int globalAmpInd = index;
/*
* each thread needs to write to a local:
* long long int phaseInds[numRegs];
* but instead has access to shared array phaseInds, with below stride and offset
*/
size_t stride = gridDim.x*blockDim.x;
size_t offset = blockIdx.x*blockDim.x + threadIdx.x;
// determine phase indices
if (encoding == UNSIGNED) {
int flatInd = 0;
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
}
}
else if (encoding == TWOS_COMPLEMENT) {
int flatInd = 0;
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]-1; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
// use final qubit to indicate sign
if (extractBit(qubits[flatInd++], globalAmpInd) == 1)
phaseInds[r*stride+offset] -= (1LL << (numQubitsPerReg[r]-1));
}
}
// determine if this phase index has an overriden value (i < numOverrides)
int i;
for (i=0; i<numOverrides; i++) {
int found = 1;
for (int r=0; r<numRegs; r++) {
if (phaseInds[r*stride+offset] != overrideInds[i*numRegs+r]) {
found = 0;
break;
}
}
if (found)
break;
}
// compute the phase (unless overriden)
qreal phase = 0;
if (i < numOverrides)
phase = overridePhases[i];
else {
// compute norm related phases
if (phaseFuncName == NORM || phaseFuncName == INVERSE_NORM ||
phaseFuncName == SCALED_NORM || phaseFuncName == SCALED_INVERSE_NORM ||
phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) {
qreal norm = 0;
if (phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) {
for (int r=0; r<numRegs; r++) {
qreal dif = phaseInds[r*stride+offset] - params[2+r];
norm += dif*dif;
}
}
else
for (int r=0; r<numRegs; r++)
norm += phaseInds[r*stride+offset]*phaseInds[r*stride+offset];
norm = sqrt(norm);
if (phaseFuncName == NORM)
phase = norm;
else if (phaseFuncName == INVERSE_NORM)
phase = (norm == 0.)? params[0] : 1/norm;
else if (phaseFuncName == SCALED_NORM)
phase = params[0] * norm;
else if (phaseFuncName == SCALED_INVERSE_NORM || phaseFuncName == SCALED_INVERSE_SHIFTED_NORM)
phase = (norm == 0.)? params[1] : params[0] / norm;
}
// compute product related phases
else if (phaseFuncName == PRODUCT || phaseFuncName == INVERSE_PRODUCT ||
phaseFuncName == SCALED_PRODUCT || phaseFuncName == SCALED_INVERSE_PRODUCT) {
qreal prod = 1;
for (int r=0; r<numRegs; r++)
prod *= phaseInds[r*stride+offset];
if (phaseFuncName == PRODUCT)
phase = prod;
else if (phaseFuncName == INVERSE_PRODUCT)
phase = (prod == 0.)? params[0] : 1/prod;
else if (phaseFuncName == SCALED_PRODUCT)
phase = params[0] * prod;
else if (phaseFuncName == SCALED_INVERSE_PRODUCT)
phase = (prod == 0.)? params[1] : params[0] / prod;
}
// compute Euclidean distance related phases
else if (phaseFuncName == DISTANCE || phaseFuncName == INVERSE_DISTANCE ||
phaseFuncName == SCALED_DISTANCE || phaseFuncName == SCALED_INVERSE_DISTANCE ||
phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE) {
qreal dist = 0;
if (phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE) {
for (int r=0; r<numRegs; r+=2) {
qreal dif = (phaseInds[(r+1)*stride+offset] - phaseInds[r*stride+offset] - params[2+r/2]);
dist += dif*dif;
}
}
else
for (int r=0; r<numRegs; r+=2) {
qreal dif = (phaseInds[(r+1)*stride+offset] - phaseInds[r*stride+offset]);
dist += dif*dif;
}
dist = sqrt(dist);
if (phaseFuncName == DISTANCE)
phase = dist;
else if (phaseFuncName == INVERSE_DISTANCE)
phase = (dist == 0.)? params[0] : 1/dist;
else if (phaseFuncName == SCALED_DISTANCE)
phase = params[0] * dist;
else if (phaseFuncName == SCALED_INVERSE_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE)
phase = (dist == 0.)? params[1] : params[0] / dist;
}
}
// negate phase to conjugate operator
if (conj)
phase *= -1;
// modify amp to amp * exp(i phase)
qreal c = cos(phase);
qreal s = sin(phase);
qreal re = qureg.deviceStateVec.real[index];
qreal im = qureg.deviceStateVec.imag[index];
// = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)}
qureg.deviceStateVec.real[index] = re*c - im*s;
qureg.deviceStateVec.imag[index] = re*s + im*c;
}
void statevec_applyParamNamedPhaseFuncOverrides(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
enum phaseFunc phaseFuncName, qreal* params, int numParams,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
// determine size of arrays, for cloning into GPU memory
size_t mem_numQubitsPerReg = numRegs * sizeof *numQubitsPerReg;
size_t mem_overridePhases = numOverrides * sizeof *overridePhases;
size_t mem_overrideInds = numOverrides * numRegs * sizeof *overrideInds;
size_t mem_params = numParams * sizeof *params;
size_t mem_qubits = 0;
for (int r=0; r<numRegs; r++)
mem_qubits += numQubitsPerReg[r] * sizeof *qubits;
// allocate global GPU memory
int* d_qubits; hipMalloc(&d_qubits, mem_qubits);
int* d_numQubitsPerReg; hipMalloc(&d_numQubitsPerReg, mem_numQubitsPerReg);
long long int* d_overrideInds; hipMalloc(&d_overrideInds, mem_overrideInds);
qreal* d_overridePhases; hipMalloc(&d_overridePhases, mem_overridePhases);
qreal* d_params = NULL; if (numParams > 0) hipMalloc(&d_params, mem_params);
// copy function args into GPU memory
hipMemcpy(d_qubits, qubits, mem_qubits, hipMemcpyHostToDevice);
hipMemcpy(d_numQubitsPerReg, numQubitsPerReg, mem_numQubitsPerReg, hipMemcpyHostToDevice);
hipMemcpy(d_overrideInds, overrideInds, mem_overrideInds, hipMemcpyHostToDevice);
hipMemcpy(d_overridePhases, overridePhases, mem_overridePhases, hipMemcpyHostToDevice);
if (numParams > 0)
hipMemcpy(d_params, params, mem_params, hipMemcpyHostToDevice);
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock);
// allocate thread-local working space {phaseInds}
long long int *d_phaseInds;
size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks;
hipMalloc(&d_phaseInds, numRegs*gridSize * sizeof *d_phaseInds);
// call kernel
hipLaunchKernelGGL(( statevec_applyParamNamedPhaseFuncOverridesKernel), dim3(CUDABlocks),dim3(threadsPerCUDABlock), 0, 0,
qureg, d_qubits, d_numQubitsPerReg, numRegs, encoding,
phaseFuncName, d_params, numParams,
d_overrideInds, d_overridePhases, numOverrides,
d_phaseInds,
conj);
// free device memory
hipFree(d_qubits);
hipFree(d_numQubitsPerReg);
hipFree(d_overrideInds);
hipFree(d_overridePhases);
hipFree(d_phaseInds);
if (numParams > 0)
hipFree(d_params);
}
void seedQuESTDefault(){
// init MT random number generator with three keys -- time and pid
// for the MPI version, it is ok that all procs will get the same seed as random numbers will only be
// used by the master process
unsigned long int key[2];
getQuESTDefaultSeedKey(key);
init_by_array(key, 2);
}
#ifdef __cplusplus
}
#endif
| c7251286abb628b308cb1ab44419b902b54ff6ae.cu | // Distributed under MIT licence. See https://github.com/QuEST-Kit/QuEST/blob/master/LICENCE.txt for details
/** @file
* An implementation of the backend in ../QuEST_internal.h for a GPU environment.
*
* @author Ania Brown
* @author Tyson Jones
*/
# include "QuEST.h"
# include "QuEST_precision.h"
# include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey
# include "mt19937ar.h"
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# define REDUCE_SHARED_SIZE 512
# define DEBUG 0
/*
* struct types for concisely passing unitaries to kernels
*/
// hide these from doxygen
/// \cond HIDDEN_SYMBOLS
typedef struct ArgMatrix2 {
Complex r0c0, r0c1;
Complex r1c0, r1c1;
} ArgMatrix2;
typedef struct ArgMatrix4
{
Complex r0c0, r0c1, r0c2, r0c3;
Complex r1c0, r1c1, r1c2, r1c3;
Complex r2c0, r2c1, r2c2, r2c3;
Complex r3c0, r3c1, r3c2, r3c3;
} ArgMatrix4;
ArgMatrix2 argifyMatrix2(ComplexMatrix2 m) {
ArgMatrix2 a;
a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0];
a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1];
a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0];
a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1];
return a;
}
ArgMatrix4 argifyMatrix4(ComplexMatrix4 m) {
ArgMatrix4 a;
a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0];
a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1];
a.r0c2.real=m.real[0][2]; a.r0c2.imag=m.imag[0][2];
a.r0c3.real=m.real[0][3]; a.r0c3.imag=m.imag[0][3];
a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0];
a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1];
a.r1c2.real=m.real[1][2]; a.r1c2.imag=m.imag[1][2];
a.r1c3.real=m.real[1][3]; a.r1c3.imag=m.imag[1][3];
a.r2c0.real=m.real[2][0]; a.r2c0.imag=m.imag[2][0];
a.r2c1.real=m.real[2][1]; a.r2c1.imag=m.imag[2][1];
a.r2c2.real=m.real[2][2]; a.r2c2.imag=m.imag[2][2];
a.r2c3.real=m.real[2][3]; a.r2c3.imag=m.imag[2][3];
a.r3c0.real=m.real[3][0]; a.r3c0.imag=m.imag[3][0];
a.r3c1.real=m.real[3][1]; a.r3c1.imag=m.imag[3][1];
a.r3c2.real=m.real[3][2]; a.r3c2.imag=m.imag[3][2];
a.r3c3.real=m.real[3][3]; a.r3c3.imag=m.imag[3][3];
return a;
}
/// \endcond
/*
* in-kernel bit twiddling functions
*/
__forceinline__ __device__ int extractBit (const int locationOfBitFromRight, const long long int theEncodedNumber) {
return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight;
}
__forceinline__ __device__ int getBitMaskParity(long long int mask) {
int parity = 0;
while (mask) {
parity = !parity;
mask = mask & (mask-1);
}
return parity;
}
__forceinline__ __device__ long long int flipBit(const long long int number, const int bitInd) {
return (number ^ (1LL << bitInd));
}
__forceinline__ __device__ long long int insertZeroBit(const long long int number, const int index) {
long long int left, right;
left = (number >> index) << index;
right = number - left;
return (left << 1) ^ right;
}
__forceinline__ __device__ long long int insertTwoZeroBits(const long long int number, const int bit1, const int bit2) {
int small = (bit1 < bit2)? bit1 : bit2;
int big = (bit1 < bit2)? bit2 : bit1;
return insertZeroBit(insertZeroBit(number, small), big);
}
__forceinline__ __device__ long long int insertZeroBits(long long int number, int* inds, const int numInds) {
/* inserted bit inds must strictly increase, so that their final indices are correct.
* in-lieu of sorting (avoided since no C++ variable-size arrays, and since we're already
* memory bottle-necked so overhead eats this slowdown), we find the next-smallest index each
* at each insert. recall every element of inds (a positive or zero number) is unique.
* This function won't appear in the CPU code, which can use C99 variable-size arrays and
* ought to make a sorted array before threading
*/
int curMin = inds[0];
int prevMin = -1;
for (int n=0; n < numInds; n++) {
// find next min
for (int t=0; t < numInds; t++)
if (inds[t]>prevMin && inds[t]<curMin)
curMin = inds[t];
number = insertZeroBit(number, curMin);
// set curMin to an arbitrary non-visited elem
prevMin = curMin;
for (int t=0; t < numInds; t++)
if (inds[t] > curMin) {
curMin = inds[t];
break;
}
}
return number;
}
/*
* state vector and density matrix operations
*/
#ifdef __cplusplus
extern "C" {
#endif
void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) {
cudaDeviceSynchronize();
cudaMemcpy(
qureg.deviceStateVec.real + startInd,
reals,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
cudaMemcpyHostToDevice);
cudaMemcpy(
qureg.deviceStateVec.imag + startInd,
imags,
numAmps * sizeof(*(qureg.deviceStateVec.imag)),
cudaMemcpyHostToDevice);
}
/** works for both statevectors and density matrices */
void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) {
// copy copyQureg's GPU statevec to targetQureg's GPU statevec
cudaDeviceSynchronize();
cudaMemcpy(
targetQureg.deviceStateVec.real,
copyQureg.deviceStateVec.real,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)),
cudaMemcpyDeviceToDevice);
cudaMemcpy(
targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.imag,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)),
cudaMemcpyDeviceToDevice);
}
__global__ void densmatr_initPureStateKernel(
long long int numPureAmps,
qreal *targetVecReal, qreal *targetVecImag,
qreal *copyVecReal, qreal *copyVecImag)
{
// this is a particular index of the pure copyQureg
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=numPureAmps) return;
qreal realRow = copyVecReal[index];
qreal imagRow = copyVecImag[index];
for (long long int col=0; col < numPureAmps; col++) {
qreal realCol = copyVecReal[col];
qreal imagCol = - copyVecImag[col]; // minus for conjugation
targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol;
targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol;
}
}
void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock);
densmatr_initPureStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
copyQureg.numAmpsPerChunk,
targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag);
}
__global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = probFactor;
stateVecImag[index] = 0.0;
}
void densmatr_initPlusState(Qureg qureg)
{
qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented));
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
densmatr_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
probFactor,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void densmatr_initClassicalStateKernel(
long long int densityNumElems,
qreal *densityReal, qreal *densityImag,
long long int densityInd)
{
// initialise the state to all zeros
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= densityNumElems) return;
densityReal[index] = 0.0;
densityImag[index] = 0.0;
if (index==densityInd){
// classical state has probability 1
densityReal[densityInd] = 1.0;
densityImag[densityInd] = 0.0;
}
}
void densmatr_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
// index of the desired state in the flat density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int densityInd = (densityDim + 1)*stateInd;
// identical to pure version
densmatr_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, densityInd);
}
void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env)
{
// allocate CPU memory
long long int numAmps = 1L << numQubits;
long long int numAmpsPerRank = numAmps/env.numRanks;
qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real));
qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag));
if (env.numRanks>1){
qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real));
qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag));
}
// check cpu memory allocation was successful
if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
qureg->numQubitsInStateVec = numQubits;
qureg->numAmpsPerChunk = numAmpsPerRank;
qureg->numAmpsTotal = numAmps;
qureg->chunkId = env.rank;
qureg->numChunks = env.numRanks;
qureg->isDensityMatrix = 0;
// allocate GPU memory
cudaMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real)));
cudaMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag)));
cudaMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal));
cudaMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))*
sizeof(qreal));
// check gpu memory allocation was successful
if (!(qureg->deviceStateVec.real) || !(qureg->deviceStateVec.imag)){
printf("Could not allocate memory on GPU!\n");
exit (EXIT_FAILURE);
}
}
void statevec_destroyQureg(Qureg qureg, QuESTEnv env)
{
// Free CPU memory
free(qureg.stateVec.real);
free(qureg.stateVec.imag);
if (env.numRanks>1){
free(qureg.pairStateVec.real);
free(qureg.pairStateVec.imag);
}
// Free GPU memory
cudaFree(qureg.deviceStateVec.real);
cudaFree(qureg.deviceStateVec.imag);
cudaFree(qureg.firstLevelReduction);
cudaFree(qureg.secondLevelReduction);
}
DiagonalOp agnostic_createDiagonalOp(int numQubits, QuESTEnv env) {
DiagonalOp op;
op.numQubits = numQubits;
op.numElemsPerChunk = (1LL << numQubits) / env.numRanks;
op.chunkId = env.rank;
op.numChunks = env.numRanks;
// allocate CPU memory (initialised to zero)
op.real = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal));
op.imag = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal));
// @TODO no handling of rank>1 allocation (no distributed GPU)
// check cpu memory allocation was successful
if ( !op.real || !op.imag ) {
printf("Could not allocate memory!\n");
exit(EXIT_FAILURE);
}
// allocate GPU memory
size_t arrSize = op.numElemsPerChunk * sizeof(qreal);
cudaMalloc(&(op.deviceOperator.real), arrSize);
cudaMalloc(&(op.deviceOperator.imag), arrSize);
// check gpu memory allocation was successful
if (!op.deviceOperator.real || !op.deviceOperator.imag) {
printf("Could not allocate memory on GPU!\n");
exit(EXIT_FAILURE);
}
// initialise GPU memory to zero
cudaMemset(op.deviceOperator.real, 0, arrSize);
cudaMemset(op.deviceOperator.imag, 0, arrSize);
return op;
}
void agnostic_destroyDiagonalOp(DiagonalOp op) {
free(op.real);
free(op.imag);
cudaFree(op.deviceOperator.real);
cudaFree(op.deviceOperator.imag);
}
void agnostic_syncDiagonalOp(DiagonalOp op) {
cudaDeviceSynchronize();
size_t mem_elems = op.numElemsPerChunk * sizeof *op.real;
cudaMemcpy(op.deviceOperator.real, op.real, mem_elems, cudaMemcpyHostToDevice);
cudaMemcpy(op.deviceOperator.imag, op.imag, mem_elems, cudaMemcpyHostToDevice);
}
__global__ void agnostic_initDiagonalOpFromPauliHamilKernel(
DiagonalOp op, enum pauliOpType* pauliCodes, qreal* termCoeffs, int numSumTerms
) {
// each thread processes one diagonal element
long long int elemInd = blockIdx.x*blockDim.x + threadIdx.x;
if (elemInd >= op.numElemsPerChunk)
return;
qreal elem = 0;
// elem is (+-) every coefficient, with sign determined by parity
for (int t=0; t<numSumTerms; t++) {
// determine the parity of the Z-targeted qubits in the element's corresponding state
int isOddNumOnes = 0;
for (int q=0; q<op.numQubits; q++)
if (pauliCodes[q + t*op.numQubits] == PAULI_Z)
if (extractBit(q, elemInd))
isOddNumOnes = !isOddNumOnes;
// avoid warp divergence
int sign = 1 - 2*isOddNumOnes; // (-1 if isOddNumOnes, else +1)
elem += termCoeffs[t] * sign;
}
op.deviceOperator.real[elemInd] = elem;
op.deviceOperator.imag[elemInd] = 0;
}
void agnostic_initDiagonalOpFromPauliHamil(DiagonalOp op, PauliHamil hamil) {
// copy args intop GPU memory
enum pauliOpType* d_pauliCodes;
size_t mem_pauliCodes = hamil.numSumTerms * op.numQubits * sizeof *d_pauliCodes;
cudaMalloc(&d_pauliCodes, mem_pauliCodes);
cudaMemcpy(d_pauliCodes, hamil.pauliCodes, mem_pauliCodes, cudaMemcpyHostToDevice);
qreal* d_termCoeffs;
size_t mem_termCoeffs = hamil.numSumTerms * sizeof *d_termCoeffs;
cudaMalloc(&d_termCoeffs, mem_termCoeffs);
cudaMemcpy(d_termCoeffs, hamil.termCoeffs, mem_termCoeffs, cudaMemcpyHostToDevice);
int numThreadsPerBlock = 128;
int numBlocks = ceil(op.numElemsPerChunk / (qreal) numThreadsPerBlock);
agnostic_initDiagonalOpFromPauliHamilKernel<<<numBlocks, numThreadsPerBlock>>>(
op, d_pauliCodes, d_termCoeffs, hamil.numSumTerms);
// copy populated operator into to RAM
cudaDeviceSynchronize();
size_t mem_elems = op.numElemsPerChunk * sizeof *op.real;
cudaMemcpy(op.real, op.deviceOperator.real, mem_elems, cudaMemcpyDeviceToHost);
cudaMemcpy(op.imag, op.deviceOperator.imag, mem_elems, cudaMemcpyDeviceToHost);
cudaFree(d_pauliCodes);
cudaFree(d_termCoeffs);
}
int GPUExists(void){
int deviceCount, device;
int gpuDeviceCount = 0;
struct cudaDeviceProp properties;
cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount);
if (cudaResultCode != cudaSuccess) deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties(&properties, device);
if (properties.major != 9999) { /* 9999 means emulation only */
++gpuDeviceCount;
}
}
if (gpuDeviceCount) return 1;
else return 0;
}
QuESTEnv createQuESTEnv(void) {
if (!GPUExists()){
printf("Trying to run GPU code with no GPU available\n");
exit(EXIT_FAILURE);
}
QuESTEnv env;
env.rank=0;
env.numRanks=1;
seedQuESTDefault();
return env;
}
void syncQuESTEnv(QuESTEnv env){
cudaDeviceSynchronize();
}
int syncQuESTSuccess(int successCode){
return successCode;
}
void destroyQuESTEnv(QuESTEnv env){
// MPI finalize goes here in MPI version. Call this function anyway for consistency
}
void reportQuESTEnv(QuESTEnv env){
printf("EXECUTION ENVIRONMENT:\n");
printf("Running locally on one node with GPU\n");
printf("Number of ranks is %d\n", env.numRanks);
# ifdef _OPENMP
printf("OpenMP enabled\n");
printf("Number of threads available is %d\n", omp_get_max_threads());
# else
printf("OpenMP disabled\n");
# endif
}
void getEnvironmentString(QuESTEnv env, char str[200]){
// OpenMP can be hybridised with GPU in future, so this check is safe and worthwhile
int ompStatus=0;
int numThreads=1;
# ifdef _OPENMP
ompStatus=1;
numThreads=omp_get_max_threads();
# endif
// there is no reporting of CUDA cores/threads/blocks currently (since non-trivial)
sprintf(str, "CUDA=1 OpenMP=%d MPI=0 threads=%d ranks=1", ompStatus, numThreads);
}
void copyStateToGPU(Qureg qureg)
{
if (DEBUG) printf("Copying data to GPU\n");
cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice);
cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice);
if (DEBUG) printf("Finished copying data to GPU\n");
}
void copyStateFromGPU(Qureg qureg)
{
cudaDeviceSynchronize();
if (DEBUG) printf("Copying data from GPU\n");
cudaMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost);
cudaMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost);
if (DEBUG) printf("Finished copying data from GPU\n");
}
/** Print the current state vector of probability amplitudes for a set of qubits to standard out.
For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits
*/
void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){
long long int index;
int rank;
copyStateFromGPU(qureg);
if (qureg.numQubitsInStateVec<=5){
for (rank=0; rank<qureg.numChunks; rank++){
if (qureg.chunkId==rank){
if (reportRank) {
printf("Reporting state from rank %d [\n", qureg.chunkId);
//printf("\trank, index, real, imag\n");
printf("real, imag\n");
} else if (rank==0) {
printf("Reporting state [\n");
printf("real, imag\n");
}
for(index=0; index<qureg.numAmpsPerChunk; index++){
printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]);
}
if (reportRank || rank==qureg.numChunks-1) printf("]\n");
}
syncQuESTEnv(env);
}
}
}
qreal statevec_getRealAmp(Qureg qureg, long long int index){
qreal el=0;
cudaMemcpy(&el, &(qureg.deviceStateVec.real[index]),
sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost);
return el;
}
qreal statevec_getImagAmp(Qureg qureg, long long int index){
qreal el=0;
cudaMemcpy(&el, &(qureg.deviceStateVec.imag[index]),
sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost);
return el;
}
__global__ void statevec_initBlankStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
// initialise the statevector to be all-zeros
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
void statevec_initBlankState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initBlankStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
// initialise the state to |0000..0000>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==0){
// zero state |0000..0000> has probability 1
stateVecReal[0] = 1.0;
stateVecImag[0] = 0.0;
}
}
void statevec_initZeroState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initZeroStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize);
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
}
void statevec_initPlusState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){
long long int index;
// initialise the state to |stateInd>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==stateInd){
// classical state has probability 1
stateVecReal[stateInd] = 1.0;
stateVecImag[stateInd] = 0.0;
}
}
void statevec_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, stateInd);
}
__global__ void statevec_initDebugStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = (index*2.0)/10.0;
stateVecImag[index] = (index*2.0+1.0)/10.0;
}
void statevec_initDebugState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initDebugStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){
long long int index;
int bit;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2);
bit = extractBit(qubitId, index);
if (bit==outcome) {
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
} else {
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
}
void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initStateOfSingleQubitKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome);
}
// returns 1 if successful, else 0
int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){
long long int chunkSize, stateVecSize;
long long int indexInChunk, totalIndex;
chunkSize = qureg->numAmpsPerChunk;
stateVecSize = chunkSize*qureg->numChunks;
qreal *stateVecReal = qureg->stateVec.real;
qreal *stateVecImag = qureg->stateVec.imag;
FILE *fp;
char line[200];
fp = fopen(filename, "r");
if (fp == NULL)
return 0;
indexInChunk = 0; totalIndex = 0;
while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){
if (line[0]!='#'){
int chunkId = totalIndex/chunkSize;
if (chunkId==qureg->chunkId){
# if QuEST_PREC==1
sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==2
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==4
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# endif
indexInChunk += 1;
}
totalIndex += 1;
}
}
fclose(fp);
copyStateToGPU(*qureg);
// indicate success
return 1;
}
int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){
qreal diff;
int chunkSize = mq1.numAmpsPerChunk;
copyStateFromGPU(mq1);
copyStateFromGPU(mq2);
for (int i=0; i<chunkSize; i++){
diff = mq1.stateVec.real[i] - mq2.stateVec.real[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
}
return 1;
}
__global__ void statevec_compactUnitaryKernel (Qureg qureg, int rotQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << rotQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
void statevec_compactUnitary(Qureg qureg, int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_compactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, alpha, beta);
}
__global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
}
void statevec_controlledCompactUnitary(Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_controlledCompactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, alpha, beta);
}
__global__ void statevec_unitaryKernel(Qureg qureg, int targetQubit, ArgMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
void statevec_unitary(Qureg qureg, int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_unitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, argifyMatrix2(u));
}
__global__ void statevec_multiControlledMultiQubitUnitaryKernel(
Qureg qureg, long long int ctrlMask, int* targs, int numTargs,
qreal* uRe, qreal* uIm, long long int* ampInds, qreal* reAmps, qreal* imAmps, long long int numTargAmps)
{
// decide the amplitudes this thread will modify
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
long long int numTasks = qureg.numAmpsPerChunk >> numTargs; // kernel called on every 1 in 2^numTargs amplitudes
if (thisTask>=numTasks) return;
// find this task's start index (where all targs are 0)
long long int ind00 = insertZeroBits(thisTask, targs, numTargs);
// this task only modifies amplitudes if control qubits are 1 for this state
if (ctrlMask && (ctrlMask&ind00) != ctrlMask)
return;
qreal *reVec = qureg.deviceStateVec.real;
qreal *imVec = qureg.deviceStateVec.imag;
/*
each thread needs:
long long int ampInds[numAmps];
qreal reAmps[numAmps];
qreal imAmps[numAmps];
but instead has access to shared arrays, with below stride and offset
*/
size_t stride = gridDim.x*blockDim.x;
size_t offset = blockIdx.x*blockDim.x + threadIdx.x;
// determine the indices and record values of target amps
long long int ind;
for (int i=0; i < numTargAmps; i++) {
// get global index of current target qubit assignment
ind = ind00;
for (int t=0; t < numTargs; t++)
if (extractBit(t, i))
ind = flipBit(ind, targs[t]);
ampInds[i*stride+offset] = ind;
reAmps [i*stride+offset] = reVec[ind];
imAmps [i*stride+offset] = imVec[ind];
}
// update the amplitudes
for (int r=0; r < numTargAmps; r++) {
ind = ampInds[r*stride+offset];
reVec[ind] = 0;
imVec[ind] = 0;
for (int c=0; c < numTargAmps; c++) {
qreal uReElem = uRe[c + r*numTargAmps];
qreal uImElem = uIm[c + r*numTargAmps];
reVec[ind] += reAmps[c*stride+offset]*uReElem - imAmps[c*stride+offset]*uImElem;
imVec[ind] += reAmps[c*stride+offset]*uImElem + imAmps[c*stride+offset]*uReElem;
}
}
}
void statevec_multiControlledMultiQubitUnitary(Qureg qureg, long long int ctrlMask, int* targs, int numTargs, ComplexMatrixN u)
{
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>numTargs)/threadsPerCUDABlock);
// allocate device space for global {targs} (length: numTargs) and populate
int *d_targs;
size_t targMemSize = numTargs * sizeof *d_targs;
cudaMalloc(&d_targs, targMemSize);
cudaMemcpy(d_targs, targs, targMemSize, cudaMemcpyHostToDevice);
// flatten out the u.real and u.imag lists
int uNumRows = (1 << u.numQubits);
qreal* uReFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uReFlat);
qreal* uImFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uImFlat);
long long int i = 0;
for (int r=0; r < uNumRows; r++)
for (int c=0; c < uNumRows; c++) {
uReFlat[i] = u.real[r][c];
uImFlat[i] = u.imag[r][c];
i++;
}
// allocate device space for global u.real and u.imag (flatten by concatenating rows) and populate
qreal* d_uRe;
qreal* d_uIm;
size_t uMemSize = uNumRows*uNumRows * sizeof *d_uRe; // size of each of d_uRe and d_uIm
cudaMalloc(&d_uRe, uMemSize);
cudaMalloc(&d_uIm, uMemSize);
cudaMemcpy(d_uRe, uReFlat, uMemSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_uIm, uImFlat, uMemSize, cudaMemcpyHostToDevice);
// allocate device Wspace for thread-local {ampInds}, {reAmps}, {imAmps} (length: 1<<numTargs)
long long int *d_ampInds;
qreal *d_reAmps;
qreal *d_imAmps;
size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks;
int numTargAmps = uNumRows;
cudaMalloc(&d_ampInds, numTargAmps*gridSize * sizeof *d_ampInds);
cudaMalloc(&d_reAmps, numTargAmps*gridSize * sizeof *d_reAmps);
cudaMalloc(&d_imAmps, numTargAmps*gridSize * sizeof *d_imAmps);
// call kernel
statevec_multiControlledMultiQubitUnitaryKernel<<<CUDABlocks,threadsPerCUDABlock>>>(
qureg, ctrlMask, d_targs, numTargs, d_uRe, d_uIm, d_ampInds, d_reAmps, d_imAmps, numTargAmps);
// free kernel memory
free(uReFlat);
free(uImFlat);
cudaFree(d_targs);
cudaFree(d_uRe);
cudaFree(d_uIm);
cudaFree(d_ampInds);
cudaFree(d_reAmps);
cudaFree(d_imAmps);
}
__global__ void statevec_multiControlledTwoQubitUnitaryKernel(Qureg qureg, long long int ctrlMask, int q1, int q2, ArgMatrix4 u){
// decide the 4 amplitudes this thread will modify
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
long long int numTasks = qureg.numAmpsPerChunk >> 2; // kernel called on every 1 in 4 amplitudes
if (thisTask>=numTasks) return;
qreal *reVec = qureg.deviceStateVec.real;
qreal *imVec = qureg.deviceStateVec.imag;
// find indices of amplitudes to modify (treat q1 as the least significant bit)
long long int ind00, ind01, ind10, ind11;
ind00 = insertTwoZeroBits(thisTask, q1, q2);
// modify only if control qubits are 1 for this state
if (ctrlMask && (ctrlMask&ind00) != ctrlMask)
return;
ind01 = flipBit(ind00, q1);
ind10 = flipBit(ind00, q2);
ind11 = flipBit(ind01, q2);
// extract statevec amplitudes
qreal re00, re01, re10, re11;
qreal im00, im01, im10, im11;
re00 = reVec[ind00]; im00 = imVec[ind00];
re01 = reVec[ind01]; im01 = imVec[ind01];
re10 = reVec[ind10]; im10 = imVec[ind10];
re11 = reVec[ind11]; im11 = imVec[ind11];
// apply u * {amp00, amp01, amp10, amp11}
reVec[ind00] =
u.r0c0.real*re00 - u.r0c0.imag*im00 +
u.r0c1.real*re01 - u.r0c1.imag*im01 +
u.r0c2.real*re10 - u.r0c2.imag*im10 +
u.r0c3.real*re11 - u.r0c3.imag*im11;
imVec[ind00] =
u.r0c0.imag*re00 + u.r0c0.real*im00 +
u.r0c1.imag*re01 + u.r0c1.real*im01 +
u.r0c2.imag*re10 + u.r0c2.real*im10 +
u.r0c3.imag*re11 + u.r0c3.real*im11;
reVec[ind01] =
u.r1c0.real*re00 - u.r1c0.imag*im00 +
u.r1c1.real*re01 - u.r1c1.imag*im01 +
u.r1c2.real*re10 - u.r1c2.imag*im10 +
u.r1c3.real*re11 - u.r1c3.imag*im11;
imVec[ind01] =
u.r1c0.imag*re00 + u.r1c0.real*im00 +
u.r1c1.imag*re01 + u.r1c1.real*im01 +
u.r1c2.imag*re10 + u.r1c2.real*im10 +
u.r1c3.imag*re11 + u.r1c3.real*im11;
reVec[ind10] =
u.r2c0.real*re00 - u.r2c0.imag*im00 +
u.r2c1.real*re01 - u.r2c1.imag*im01 +
u.r2c2.real*re10 - u.r2c2.imag*im10 +
u.r2c3.real*re11 - u.r2c3.imag*im11;
imVec[ind10] =
u.r2c0.imag*re00 + u.r2c0.real*im00 +
u.r2c1.imag*re01 + u.r2c1.real*im01 +
u.r2c2.imag*re10 + u.r2c2.real*im10 +
u.r2c3.imag*re11 + u.r2c3.real*im11;
reVec[ind11] =
u.r3c0.real*re00 - u.r3c0.imag*im00 +
u.r3c1.real*re01 - u.r3c1.imag*im01 +
u.r3c2.real*re10 - u.r3c2.imag*im10 +
u.r3c3.real*re11 - u.r3c3.imag*im11;
imVec[ind11] =
u.r3c0.imag*re00 + u.r3c0.real*im00 +
u.r3c1.imag*re01 + u.r3c1.real*im01 +
u.r3c2.imag*re10 + u.r3c2.real*im10 +
u.r3c3.imag*re11 + u.r3c3.real*im11;
}
void statevec_multiControlledTwoQubitUnitary(Qureg qureg, long long int ctrlMask, int q1, int q2, ComplexMatrix4 u)
{
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); // one kernel eval for every 4 amplitudes
statevec_multiControlledTwoQubitUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, ctrlMask, q1, q2, argifyMatrix4(u));
}
__global__ void statevec_controlledUnitaryKernel(Qureg qureg, int controlQubit, int targetQubit, ArgMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_controlledUnitary(Qureg qureg, int controlQubit, int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_controlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, argifyMatrix2(u));
}
__global__ void statevec_multiControlledUnitaryKernel(
Qureg qureg,
long long int ctrlQubitsMask, long long int ctrlFlipMask,
int targetQubit, ArgMatrix2 u
){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
if (ctrlQubitsMask == (ctrlQubitsMask & (indexUp ^ ctrlFlipMask))) {
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_multiControlledUnitary(
Qureg qureg,
long long int ctrlQubitsMask, long long int ctrlFlipMask,
int targetQubit, ComplexMatrix2 u
){
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_multiControlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg, ctrlQubitsMask, ctrlFlipMask, targetQubit, argifyMatrix2(u));
}
__global__ void statevec_pauliXKernel(Qureg qureg, int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
void statevec_pauliX(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliXKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit);
}
__global__ void statevec_pauliYKernel(Qureg qureg, int targetQubit, int conjFac){
long long int sizeHalfBlock = 1LL << targetQubit;
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int numTasks = qureg.numAmpsPerChunk >> 1;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
long long int thisBlock = thisTask / sizeHalfBlock;
long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
long long int indexLo = indexUp + sizeHalfBlock;
qreal stateRealUp, stateImagUp;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
void statevec_pauliY(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, 1);
}
void statevec_pauliYConj(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, -1);
}
__global__ void statevec_controlledPauliYKernel(Qureg qureg, int controlQubit, int targetQubit, int conjFac)
{
long long int index;
long long int sizeBlock, sizeHalfBlock;
long long int stateVecSize;
int controlBit;
qreal stateRealUp, stateImagUp;
long long int thisBlock, indexUp, indexLo;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
}
void statevec_controlledPauliY(Qureg qureg, int controlQubit, int targetQubit)
{
int conjFactor = 1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor);
}
void statevec_controlledPauliYConj(Qureg qureg, int controlQubit, int targetQubit)
{
int conjFactor = -1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor);
}
__global__ void statevec_phaseShiftByTermKernel(Qureg qureg, int targetQubit, qreal cosAngle, qreal sinAngle) {
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, indexUp,indexLo;
qreal stateRealLo, stateImagLo;
long long int thisTask;
long long int numTasks = qureg.numAmpsPerChunk >> 1;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
void statevec_phaseShiftByTerm(Qureg qureg, int targetQubit, Complex term)
{
qreal cosAngle = term.real;
qreal sinAngle = term.imag;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_phaseShiftByTermKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, cosAngle, sinAngle);
}
__global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, int idQubit1, int idQubit2, qreal cosAngle, qreal sinAngle)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
qreal stateRealLo, stateImagLo;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_controlledPhaseShift(Qureg qureg, int idQubit1, int idQubit2, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2, cosAngle, sinAngle);
}
__global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) {
qreal stateRealLo, stateImagLo;
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
long long int mask = getQubitBitMask(controlQubits, numControlQubits);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiControlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle);
}
__global__ void statevec_multiRotateZKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) {
long long int stateVecSize = qureg.numAmpsPerChunk;
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
int fac = getBitMaskParity(mask & index)? -1 : 1;
qreal stateReal = stateVecReal[index];
qreal stateImag = stateVecImag[index];
stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag;
stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag;
}
void statevec_multiRotateZ(Qureg qureg, long long int mask, qreal angle)
{
qreal cosAngle = cos(angle/2.0);
qreal sinAngle = sin(angle/2.0);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiRotateZKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle);
}
__global__ void statevec_multiControlledMultiRotateZKernel(Qureg qureg, long long int ctrlMask, long long int targMask, qreal cosAngle, qreal sinAngle) {
long long int stateVecSize = qureg.numAmpsPerChunk;
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
// amplitudes corresponding to control qubits not all-in-one are unmodified
if (ctrlMask && ((ctrlMask & index) != ctrlMask))
return;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
// avoid warp divergence, setting fac = +- 1
int fac = 1-2*getBitMaskParity(targMask & index);
qreal stateReal = stateVecReal[index];
qreal stateImag = stateVecImag[index];
stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag;
stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag;
}
void statevec_multiControlledMultiRotateZ(Qureg qureg, long long int ctrlMask, long long int targMask, qreal angle)
{
qreal cosAngle = cos(angle/2.0);
qreal sinAngle = sin(angle/2.0);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiControlledMultiRotateZKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, ctrlMask, targMask, cosAngle, sinAngle);
}
qreal densmatr_calcTotalProb(Qureg qureg) {
// computes the trace using Kahan summation
qreal pTotal=0;
qreal y, t, c;
c = 0;
long long int numCols = 1LL << qureg.numQubitsRepresented;
long long diagIndex;
copyStateFromGPU(qureg);
for (int col=0; col< numCols; col++) {
diagIndex = col*(numCols + 1);
y = qureg.stateVec.real[diagIndex] - c;
t = pTotal + y;
c = ( t - pTotal ) - y; // brackets are important
pTotal = t;
}
return pTotal;
}
qreal statevec_calcTotalProb(Qureg qureg){
/* IJB - implemented using Kahan summation for greater accuracy at a slight floating
point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */
/* Don't change the bracketing in this routine! */
qreal pTotal=0;
qreal y, t, c;
long long int index;
long long int numAmpsPerRank = qureg.numAmpsPerChunk;
copyStateFromGPU(qureg);
c = 0.0;
for (index=0; index<numAmpsPerRank; index++){
/* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */
// pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index];
y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
/* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */
//pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index];
y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
}
return pTotal;
}
__global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, int idQubit1, int idQubit2)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_controlledPhaseFlip(Qureg qureg, int idQubit1, int idQubit2)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2);
}
__global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask)
{
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask = getQubitBitMask(controlQubits, numControlQubits);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiControlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask);
}
__global__ void statevec_swapQubitAmpsKernel(Qureg qureg, int qb1, int qb2) {
qreal *reVec = qureg.deviceStateVec.real;
qreal *imVec = qureg.deviceStateVec.imag;
long long int numTasks = qureg.numAmpsPerChunk >> 2; // each iteration updates 2 amps and skips 2 amps
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
long long int ind00, ind01, ind10;
qreal re01, re10, im01, im10;
// determine ind00 of |..0..0..>, |..0..1..> and |..1..0..>
ind00 = insertTwoZeroBits(thisTask, qb1, qb2);
ind01 = flipBit(ind00, qb1);
ind10 = flipBit(ind00, qb2);
// extract statevec amplitudes
re01 = reVec[ind01]; im01 = imVec[ind01];
re10 = reVec[ind10]; im10 = imVec[ind10];
// swap 01 and 10 amps
reVec[ind01] = re10; reVec[ind10] = re01;
imVec[ind01] = im10; imVec[ind10] = im01;
}
void statevec_swapQubitAmps(Qureg qureg, int qb1, int qb2)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock);
statevec_swapQubitAmpsKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, qb1, qb2);
}
__global__ void statevec_hadamardKernel (Qureg qureg, int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal recRoot2 = 1.0/sqrt(2.0);
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo);
stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo);
stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo);
stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo);
}
void statevec_hadamard(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_hadamardKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit);
}
__global__ void statevec_controlledNotKernel(Qureg qureg, int controlQubit, int targetQubit)
{
long long int index;
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
long long int stateVecSize;
int controlBit;
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
}
void statevec_controlledNot(Qureg qureg, int controlQubit, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledNotKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit);
}
__global__ void statevec_multiControlledMultiQubitNotKernel(Qureg qureg, int ctrlMask, int targMask) {
qreal* stateRe = qureg.deviceStateVec.real;
qreal* stateIm = qureg.deviceStateVec.imag;
// althouugh each thread swaps/updates two amplitudes, we still invoke one thread per amp
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= qureg.numAmpsPerChunk)
return;
// modify amplitudes only if control qubits are 1 for this state
if (ctrlMask && ((ctrlMask & ampInd) != ctrlMask))
return;
long long int mateInd = ampInd ^ targMask;
// if the mate is lower index, another thread is handling it
if (mateInd < ampInd)
return;
/* it may seem wasteful to spawn more threads than are needed, and abort
* half of them due to the amp pairing above (and potentially abort
* an exponential number due to ctrlMask). however, since we are moving
* global memory directly in a potentially non-contiguous fashoin, this
* method is likely to be memory bandwidth bottlenecked anyway
*/
qreal mateRe = stateRe[mateInd];
qreal mateIm = stateIm[mateInd];
// swap amp with mate
stateRe[mateInd] = stateRe[ampInd];
stateIm[mateInd] = stateIm[ampInd];
stateRe[ampInd] = mateRe;
stateIm[ampInd] = mateIm;
}
void statevec_multiControlledMultiQubitNot(Qureg qureg, int ctrlMask, int targMask) {
int numThreadsPerBlock = 128;
int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock);
statevec_multiControlledMultiQubitNotKernel<<<numBlocks, numThreadsPerBlock>>>(qureg, ctrlMask, targMask);
}
__device__ __host__ unsigned int log2Int( unsigned int x )
{
unsigned int ans = 0 ;
while( x>>=1 ) ans++;
return ans ;
}
__device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){
int i, l, r;
int threadMax, maxDepth;
threadMax = length/2;
maxDepth = log2Int(length/2);
for (i=0; i<maxDepth+1; i++){
if (threadIdx.x<threadMax){
l = threadIdx.x;
r = l + threadMax;
arrayIn[l] = arrayIn[r] + arrayIn[l];
}
threadMax = threadMax >> 1;
__syncthreads(); // optimise -- use warp shuffle instead
}
if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0];
}
__global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){
extern __shared__ qreal tempReductionArray[];
int blockOffset = blockIdx.x*length;
tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2];
tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1];
__syncthreads();
reduceBlock(tempReductionArray, reducedArray, length);
}
__global__ void densmatr_findProbabilityOfZeroKernel(
Qureg qureg, int measureQubit, qreal *reducedArray
) {
// run by each thread
// use of block here refers to contiguous amplitudes where measureQubit = 0,
// (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numTasks = densityDim >> 1;
long long int sizeHalfBlock = 1LL << (measureQubit);
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int thisBlock; // which block this thread is processing
long long int thisTask; // which part of the block this thread is processing
long long int basisIndex; // index of this thread's computational basis state
long long int densityIndex; // " " index of |basis><basis| in the flat density matrix
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
// figure out which density matrix prob that this thread is assigned
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
densityIndex = (densityDim + 1) * basisIndex;
// record the probability in the CUDA-BLOCK-wide array
qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0
tempReductionArray[threadIdx.x] = prob;
// sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
__global__ void statevec_findProbabilityOfZeroKernel(
Qureg qureg, int measureQubit, qreal *reducedArray
) {
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
// (good for shared memory parallelism)
extern __shared__ qreal tempReductionArray[];
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
qreal realVal, imagVal;
realVal = stateVecReal[index];
imagVal = stateVecImag[index];
tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal;
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){
int levels=0;
while (numValuesToReduce){
numValuesToReduce = numValuesToReduce/numReducedPerLevel;
levels++;
}
return levels;
}
void swapDouble(qreal **a, qreal **b){
qreal *temp;
temp = *a;
*a = *b;
*b = temp;
}
qreal densmatr_findProbabilityOfZero(Qureg qureg, int measureQubit)
{
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
densmatr_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, measureQubit, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal zeroProb;
cudaMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return zeroProb;
}
qreal statevec_findProbabilityOfZero(Qureg qureg, int measureQubit)
{
long long int numValuesToReduce = qureg.numAmpsPerChunk>>1;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
qreal stateProb=0;
int firstTime=1;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
while(numValuesToReduce>1){
if (numValuesToReduce<maxReducedPerLevel){
// Need less than one CUDA block to reduce values
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
} else {
// Use full CUDA blocks, with block size constrained by shared mem usage
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime){
statevec_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, measureQubit, qureg.firstLevelReduction);
firstTime=0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return stateProb;
}
qreal statevec_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome)
{
qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
qreal densmatr_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome)
{
qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
// atomicAdd on floats/doubles isn't available on <6 CC devices, so we add it ourselves
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__global__ void statevec_calcProbOfAllOutcomesKernel(
qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits
) {
// each thread handles one amplitude (all amplitudes are involved)
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= qureg.numAmpsTotal) return;
qreal prob = (
qureg.deviceStateVec.real[ampInd]*qureg.deviceStateVec.real[ampInd] +
qureg.deviceStateVec.imag[ampInd]*qureg.deviceStateVec.imag[ampInd]);
// each amplitude contributes to one outcome
long long int outcomeInd = 0;
for (int q=0; q<numQubits; q++)
outcomeInd += extractBit(qubits[q], ampInd) * (1LL << q);
// each thread atomically writes directly to the global output.
// this beat block-heirarchal atomic reductions in both global and shared memory!
atomicAdd(&outcomeProbs[outcomeInd], prob);
}
void statevec_calcProbOfAllOutcomes(qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits) {
// copy qubits to GPU memory
int* d_qubits;
size_t mem_qubits = numQubits * sizeof *d_qubits;
cudaMalloc(&d_qubits, mem_qubits);
cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice);
// create one thread for every amplitude
int numThreadsPerBlock = 128;
int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock);
// create global GPU array for outcomeProbs
qreal* d_outcomeProbs;
long long int numOutcomes = (1LL << numQubits);
size_t mem_outcomeProbs = numOutcomes * sizeof *d_outcomeProbs;
cudaMalloc(&d_outcomeProbs, mem_outcomeProbs);
cudaMemset(d_outcomeProbs, 0, mem_outcomeProbs);
// populate per-block subarrays
statevec_calcProbOfAllOutcomesKernel<<<numBlocks, numThreadsPerBlock>>>(
d_outcomeProbs, qureg, d_qubits, numQubits);
// copy outcomeProbs from GPU memory
cudaMemcpy(outcomeProbs, d_outcomeProbs, mem_outcomeProbs, cudaMemcpyDeviceToHost);
// free GPU memory
cudaFree(d_qubits);
cudaFree(d_outcomeProbs);
}
__global__ void densmatr_calcProbOfAllOutcomesKernel(
qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits
) {
// each thread handles one diagonal amplitude
long long int diagInd = blockIdx.x*blockDim.x + threadIdx.x;
long long int numDiags = (1LL << qureg.numQubitsRepresented);
if (diagInd >= numDiags) return;
long long int flatInd = (1 + numDiags)*diagInd;
qreal prob = qureg.deviceStateVec.real[flatInd]; // im[flatInd] assumed ~ 0
// each diagonal amplitude contributes to one outcome
long long int outcomeInd = 0;
for (int q=0; q<numQubits; q++)
outcomeInd += extractBit(qubits[q], diagInd) * (1LL << q);
// each thread atomically writes directly to the global output.
// this beat block-heirarchal atomic reductions in both global and shared memory!
atomicAdd(&outcomeProbs[outcomeInd], prob);
}
void densmatr_calcProbOfAllOutcomes(qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits) {
// copy qubits to GPU memory
int* d_qubits;
size_t mem_qubits = numQubits * sizeof *d_qubits;
cudaMalloc(&d_qubits, mem_qubits);
cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice);
// create global array, with per-block subarrays
int numThreadsPerBlock = 128;
int numDiags = (1LL << qureg.numQubitsRepresented);
int numBlocks = ceil(numDiags / (qreal) numThreadsPerBlock);
// create global GPU array for outcomeProbs
qreal* d_outcomeProbs;
long long int numOutcomes = (1LL << numQubits);
size_t mem_outcomeProbs = numOutcomes * sizeof *d_outcomeProbs;
cudaMalloc(&d_outcomeProbs, mem_outcomeProbs);
cudaMemset(d_outcomeProbs, 0, mem_outcomeProbs);
// populate per-block subarrays
densmatr_calcProbOfAllOutcomesKernel<<<numBlocks, numThreadsPerBlock>>>(
d_outcomeProbs, qureg, d_qubits, numQubits);
// copy outcomeProbs from GPU memory
cudaMemcpy(outcomeProbs, d_outcomeProbs, mem_outcomeProbs, cudaMemcpyDeviceToHost);
// free GPU memory
cudaFree(d_qubits);
cudaFree(d_outcomeProbs);
}
/** computes Tr(conjTrans(a) b) = sum of (a_ij^* b_ij), which is a real number */
__global__ void densmatr_calcInnerProductKernel(
Qureg a, Qureg b, long long int numTermsToSum, qreal* reducedArray
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
// Re{ conj(a) b } = Re{ (aRe - i aIm)(bRe + i bIm) } = aRe bRe + aIm bIm
qreal prod = (
a.deviceStateVec.real[index]*b.deviceStateVec.real[index]
+ a.deviceStateVec.imag[index]*b.deviceStateVec.imag[index]);
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = prod;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
qreal densmatr_calcInnerProduct(Qureg a, Qureg b) {
// we're summing the square of every term in the density matrix
long long int numValuesToReduce = a.numAmpsTotal;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the terms in each block
// arbitrarily store the reduction in the b qureg's array
if (firstTime) {
densmatr_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
a, b, a.numAmpsTotal, b.firstLevelReduction);
firstTime = 0;
}
// sum the block terms
else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
b.firstLevelReduction,
b.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(b.firstLevelReduction), &(b.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal innerprod;
cudaMemcpy(&innerprod, b.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return innerprod;
}
/** computes either a real or imag term in the inner product */
__global__ void statevec_calcInnerProductKernel(
int getRealComp,
qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2,
long long int numTermsToSum, qreal* reducedArray)
{
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
// choose whether to calculate the real or imaginary term of the inner product
qreal innerProdTerm;
if (getRealComp)
innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index];
else
innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index];
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = innerProdTerm;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the
* inner product, so as to not have to worry about keeping the sums separated during reduction.
* Truly disgusting, probably doubles runtime, please fix.
* @todo could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc?
*/
Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) {
qreal innerProdReal, innerProdImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// return complex
Complex innerProd;
innerProd.real = innerProdReal;
innerProd.imag = innerProdImag;
return innerProd;
}
/** computes one term of (vec^*T) dens * vec */
__global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) {
// figure out which density matrix row to consider
long long int col;
long long int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= dim) return;
qreal* densReal = dens.deviceStateVec.real;
qreal* densImag = dens.deviceStateVec.imag;
qreal* vecReal = vec.deviceStateVec.real;
qreal* vecImag = vec.deviceStateVec.imag;
// compute the row-th element of the product dens*vec
qreal prodReal = 0;
qreal prodImag = 0;
for (col=0LL; col < dim; col++) {
qreal densElemReal = densReal[dim*col + row];
qreal densElemImag = densImag[dim*col + row];
prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col];
prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col];
}
// multiply with row-th elem of (vec^*)
qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row];
// imag of every term should be zero, because each is a valid fidelity calc of an eigenstate
//qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row];
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = termReal;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) {
// we're summing the square of every term in the density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
// store the reduction in the pureState array
if (firstTime) {
densmatr_calcFidelityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, pureState, densityDim, pureState.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
pureState.firstLevelReduction,
pureState.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal fidelity;
cudaMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return fidelity;
}
__global__ void densmatr_calcHilbertSchmidtDistanceSquaredKernel(
qreal* aRe, qreal* aIm, qreal* bRe, qreal* bIm,
long long int numAmpsToSum, qreal *reducedArray
) {
// figure out which density matrix term this thread is assigned
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numAmpsToSum) return;
// compute this thread's sum term
qreal difRe = aRe[index] - bRe[index];
qreal difIm = aIm[index] - bIm[index];
qreal term = difRe*difRe + difIm*difIm;
// array of each thread's collected term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = term;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/* computes sqrt(Tr( (a-b) conjTrans(a-b) ) = sqrt( sum of abs vals of (a-b)) */
qreal densmatr_calcHilbertSchmidtDistance(Qureg a, Qureg b) {
// we're summing the square of every term in (a-b)
long long int numValuesToReduce = a.numAmpsPerChunk;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block (store reduction temp values in a's reduction array)
if (firstTime) {
densmatr_calcHilbertSchmidtDistanceSquaredKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
a.deviceStateVec.real, a.deviceStateVec.imag,
b.deviceStateVec.real, b.deviceStateVec.imag,
numValuesToReduce, a.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
a.firstLevelReduction,
a.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(a.firstLevelReduction), &(a.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal trace;
cudaMemcpy(&trace, a.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
qreal sqrtTrace = sqrt(trace);
return sqrtTrace;
}
__global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) {
// figure out which density matrix term this thread is assigned
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numAmpsToSum) return;
qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = term;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Computes the trace of the density matrix squared */
qreal densmatr_calcPurity(Qureg qureg) {
// we're summing the square of every term in the density matrix
long long int numValuesToReduce = qureg.numAmpsPerChunk;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
densmatr_calcPurityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
numValuesToReduce, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal traceDensSquared;
cudaMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return traceDensSquared;
}
__global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability)
{
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- measured probability
qreal renorm; // probability (returned) value
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
// (good for shared memory parallelism)
long long int numTasks=qureg.numAmpsPerChunk>>1;
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
renorm=1/sqrt(totalProbability);
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
if (outcome==0){
stateVecReal[index]=stateVecReal[index]*renorm;
stateVecImag[index]=stateVecImag[index]*renorm;
stateVecReal[index+sizeHalfBlock]=0;
stateVecImag[index+sizeHalfBlock]=0;
} else if (outcome==1){
stateVecReal[index]=0;
stateVecImag[index]=0;
stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm;
stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm;
}
}
/*
* outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or
* else the state-vector will lose normalisation
*/
void statevec_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, measureQubit, outcome, outcomeProb);
}
/** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */
__global__ void densmatr_collapseToKnownProbOutcomeKernel(
qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit,
long long int part1, long long int part2, long long int part3,
long long int rowBit, long long int colBit, long long int desired, long long int undesired)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numBasesToVisit) return;
long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
// renormalise desired outcome
vecReal[base + desired] /= outcomeProb;
vecImag[base + desired] /= outcomeProb;
// kill undesired outcome
vecReal[base + undesired] = 0;
vecImag[base + undesired] = 0;
// kill |..0..><..1..| states
vecReal[base + colBit] = 0;
vecImag[base + colBit] = 0;
vecReal[base + rowBit] = 0;
vecImag[base + rowBit] = 0;
}
/** This involves finding |...i...><...j...| states and killing those where i!=j */
void densmatr_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) {
int rowQubit = measureQubit + qureg.numQubitsRepresented;
int colBit = 1LL << measureQubit;
int rowBit = 1LL << rowQubit;
long long int numBasesToVisit = qureg.numAmpsPerChunk/4;
long long int part1 = colBit -1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numBasesToVisit - (rowBit >> 1);
long long int desired, undesired;
if (outcome == 0) {
desired = 0;
undesired = colBit | rowBit;
} else {
desired = colBit | rowBit;
undesired = 0;
}
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock);
densmatr_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit,
part1, part2, part3, rowBit, colBit, desired, undesired);
}
__global__ void densmatr_mixDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) {
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= numAmpsToVisit) return;
combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd];
combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd];
}
void densmatr_mixDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) {
long long int numAmpsToVisit = combineQureg.numAmpsPerChunk;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixDensityMatrixKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
combineQureg, otherProb, otherQureg, numAmpsToVisit
);
}
/** Called once for every 4 amplitudes in density matrix
* Works by establishing the |..0..><..0..| state (for its given index) then
* visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3|
* From the brain of Simon Benjamin
*/
__global__ void densmatr_mixDephasingKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int colBit, long long int rowBit)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
vecReal[ampInd + colBit] *= fac;
vecImag[ampInd + colBit] *= fac;
vecReal[ampInd + rowBit] *= fac;
vecImag[ampInd + rowBit] *= fac;
}
void densmatr_oneQubitDegradeOffDiagonal(Qureg qureg, int targetQubit, qreal dephFac) {
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixDephasingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, colBit, rowBit);
}
void densmatr_mixDephasing(Qureg qureg, int targetQubit, qreal dephase) {
if (dephase == 0)
return;
qreal dephFac = 1 - dephase;
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephFac);
}
/** Called 12 times for every 16 amplitudes in density matrix
* Each sums from the |..0..0..><..0..0..| index to visit either
* |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..|
* etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|.
* From the brain of Simon Benjamin
*/
__global__ void densmatr_mixTwoQubitDephasingKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3, long long int part4, long long int part5,
long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2)
{
long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x;
if (outerInd >= numAmpsToVisit) return;
// sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A|
int meta = 1 + (outerInd/numBackgroundStates);
if (meta > 4) meta++;
if (meta > 9) meta++;
long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2);
long long int scanInd = outerInd % numBackgroundStates;
long long int stateInd = (
shift +
(scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4));
vecReal[stateInd] *= fac;
vecImag[stateInd] *= fac;
}
// @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems?
void densmatr_mixTwoQubitDephasing(Qureg qureg, int qubit1, int qubit2, qreal dephase) {
if (dephase == 0)
return;
// assumes qubit2 > qubit1
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3);
qreal dephFac = 1 - dephase;
// refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed)
long long int numBackgroundStates = qureg.numAmpsPerChunk/16;
// 12 of these states experience dephasing
long long int numAmpsToVisit = 12 * numBackgroundStates;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixTwoQubitDephasingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit,
part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2);
}
/** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_mixDepolarisingKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]);
qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]);
vecReal[baseInd] *= 1 - depolLevel;
vecImag[baseInd] *= 1 - depolLevel;
vecReal[targetInd] *= 1 - depolLevel;
vecImag[targetInd] *= 1 - depolLevel;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
vecReal[targetInd] += realAvDepol;
vecImag[targetInd] += imagAvDepol;
}
/** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_mixDampingKernel(
qreal damping, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = damping * ( vecReal[targetInd]);
qreal imagAvDepol = damping * ( vecImag[targetInd]);
vecReal[targetInd] *= 1 - damping;
vecImag[targetInd] *= 1 - damping;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
}
void densmatr_mixDepolarising(Qureg qureg, int targetQubit, qreal depolLevel) {
if (depolLevel == 0)
return;
densmatr_mixDephasing(qureg, targetQubit, depolLevel);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixDepolarisingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
void densmatr_mixDamping(Qureg qureg, int targetQubit, qreal damping) {
if (damping == 0)
return;
qreal dephase = sqrt(1-damping);
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephase);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixDampingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
damping, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
/** Called once for every 16 amplitudes */
__global__ void densmatr_mixTwoQubitDepolarisingKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int part4, long long int part5,
long long int rowCol1, long long int rowCol2)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
// index of |..0..0..><..0..0|
long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4);
long long int ind01 = ind00 + rowCol1;
long long int ind10 = ind00 + rowCol2;
long long int ind11 = ind00 + rowCol1 + rowCol2;
qreal realAvDepol = depolLevel * 0.25 * (
vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]);
qreal imagAvDepol = depolLevel * 0.25 * (
vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]);
qreal retain = 1 - depolLevel;
vecReal[ind00] *= retain; vecImag[ind00] *= retain;
vecReal[ind01] *= retain; vecImag[ind01] *= retain;
vecReal[ind10] *= retain; vecImag[ind10] *= retain;
vecReal[ind11] *= retain; vecImag[ind11] *= retain;
vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol;
vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol;
vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol;
vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol;
}
void densmatr_mixTwoQubitDepolarising(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) {
if (depolLevel == 0)
return;
// assumes qubit2 > qubit1
densmatr_mixTwoQubitDephasing(qureg, qubit1, qubit2, depolLevel);
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int rowCol1 = colBit1 | rowBit1;
long long int rowCol2 = colBit2 | rowBit2;
long long int numAmpsToVisit = qureg.numAmpsPerChunk/16;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = numAmpsToVisit - (rowBit2 >> 3);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixTwoQubitDepolarisingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, part4, part5, rowCol1, rowCol2);
}
__global__ void statevec_setWeightedQuregKernel(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) {
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
long long int numAmpsToVisit = qureg1.numAmpsPerChunk;
if (ampInd >= numAmpsToVisit) return;
qreal *vecRe1 = qureg1.deviceStateVec.real;
qreal *vecIm1 = qureg1.deviceStateVec.imag;
qreal *vecRe2 = qureg2.deviceStateVec.real;
qreal *vecIm2 = qureg2.deviceStateVec.imag;
qreal *vecReOut = out.deviceStateVec.real;
qreal *vecImOut = out.deviceStateVec.imag;
qreal facRe1 = fac1.real;
qreal facIm1 = fac1.imag;
qreal facRe2 = fac2.real;
qreal facIm2 = fac2.imag;
qreal facReOut = facOut.real;
qreal facImOut = facOut.imag;
qreal re1,im1, re2,im2, reOut,imOut;
long long int index = ampInd;
re1 = vecRe1[index]; im1 = vecIm1[index];
re2 = vecRe2[index]; im2 = vecIm2[index];
reOut = vecReOut[index];
imOut = vecImOut[index];
vecReOut[index] = (facReOut*reOut - facImOut*imOut) + (facRe1*re1 - facIm1*im1) + (facRe2*re2 - facIm2*im2);
vecImOut[index] = (facReOut*imOut + facImOut*reOut) + (facRe1*im1 + facIm1*re1) + (facRe2*im2 + facIm2*re2);
}
void statevec_setWeightedQureg(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) {
long long int numAmpsToVisit = qureg1.numAmpsPerChunk;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
statevec_setWeightedQuregKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
fac1, qureg1, fac2, qureg2, facOut, out
);
}
__global__ void statevec_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) {
// each thread modifies one value; a wasteful and inefficient strategy
long long int numTasks = qureg.numAmpsPerChunk;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask >= numTasks) return;
qreal* stateRe = qureg.deviceStateVec.real;
qreal* stateIm = qureg.deviceStateVec.imag;
qreal* opRe = op.deviceOperator.real;
qreal* opIm = op.deviceOperator.imag;
qreal a = stateRe[thisTask];
qreal b = stateIm[thisTask];
qreal c = opRe[thisTask];
qreal d = opIm[thisTask];
// (a + b i)(c + d i) = (a c - b d) + i (a d + b c)
stateRe[thisTask] = a*c - b*d;
stateIm[thisTask] = a*d + b*c;
}
void statevec_applyDiagonalOp(Qureg qureg, DiagonalOp op)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_applyDiagonalOpKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, op);
}
__global__ void densmatr_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) {
// each thread modifies one value; a wasteful and inefficient strategy
long long int numTasks = qureg.numAmpsPerChunk;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask >= numTasks) return;
qreal* stateRe = qureg.deviceStateVec.real;
qreal* stateIm = qureg.deviceStateVec.imag;
qreal* opRe = op.deviceOperator.real;
qreal* opIm = op.deviceOperator.imag;
int opDim = (1 << op.numQubits);
qreal a = stateRe[thisTask];
qreal b = stateIm[thisTask];
qreal c = opRe[thisTask % opDim];
qreal d = opIm[thisTask % opDim];
// (a + b i)(c + d i) = (a c - b d) + i (a d + b c)
stateRe[thisTask] = a*c - b*d;
stateIm[thisTask] = a*d + b*c;
}
void densmatr_applyDiagonalOp(Qureg qureg, DiagonalOp op) {
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
densmatr_applyDiagonalOpKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, op);
}
/** computes either a real or imag term of |vec_i|^2 op_i */
__global__ void statevec_calcExpecDiagonalOpKernel(
int getRealComp,
qreal* vecReal, qreal* vecImag, qreal* opReal, qreal* opImag,
long long int numTermsToSum, qreal* reducedArray)
{
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
qreal vecAbs = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index];
// choose whether to calculate the real or imaginary term of the expec term
qreal expecVal;
if (getRealComp)
expecVal = vecAbs * opReal[index];
else
expecVal = vecAbs * opImag[index];
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = expecVal;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
Complex statevec_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) {
/* @TODO: remove all this reduction boilerplate from QuEST GPU
* (e.g. a func which accepts a pointer to do every-value reduction?)
*/
qreal expecReal, expecImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// return complex
Complex expecVal;
expecVal.real = expecReal;
expecVal.imag = expecImag;
return expecVal;
}
__global__ void densmatr_calcExpecDiagonalOpKernel(
int getRealComp,
qreal* matReal, qreal* matImag, qreal* opReal, qreal* opImag,
int numQubits, long long int numTermsToSum, qreal* reducedArray)
{
/** if the thread represents a diagonal op, then it computes either a
* real or imag term of matr_{ii} op_i. Otherwise, it writes a 0 to the
* reduction array
*/
// index will identy one of the 2^Q diagonals to be summed
long long int matInd = blockIdx.x*blockDim.x + threadIdx.x;
if (matInd >= numTermsToSum) return;
long long int diagSpacing = (1LL << numQubits) + 1LL;
int isDiag = ((matInd % diagSpacing) == 0);
long long int opInd = matInd / diagSpacing;
qreal val = 0;
if (isDiag) {
qreal matRe = matReal[matInd];
qreal matIm = matImag[matInd];
qreal opRe = opReal[opInd];
qreal opIm = opImag[opInd];
// (matRe + matIm i)(opRe + opIm i) =
// (matRe opRe - matIm opIm) + i (matRe opIm + matIm opRe)
if (getRealComp)
val = matRe * opRe - matIm * opIm;
else
val = matRe * opIm + matIm * opRe;
}
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = val;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
Complex densmatr_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) {
/* @TODO: remove all this reduction boilerplate from QuEST GPU
* (e.g. a func which accepts a pointer to do every-value reduction?)
*/
qreal expecReal, expecImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
densmatr_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
op.numQubits, numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
densmatr_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
op.numQubits, numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// return complex
Complex expecVal;
expecVal.real = expecReal;
expecVal.imag = expecImag;
return expecVal;
}
void agnostic_setDiagonalOpElems(DiagonalOp op, long long int startInd, qreal* real, qreal* imag, long long int numElems) {
// update both RAM and VRAM, for consistency
memcpy(&op.real[startInd], real, numElems * sizeof(qreal));
memcpy(&op.imag[startInd], imag, numElems * sizeof(qreal));
cudaDeviceSynchronize();
cudaMemcpy(
op.deviceOperator.real + startInd,
real,
numElems * sizeof(*(op.deviceOperator.real)),
cudaMemcpyHostToDevice);
cudaMemcpy(
op.deviceOperator.imag + startInd,
imag,
numElems * sizeof(*(op.deviceOperator.imag)),
cudaMemcpyHostToDevice);
}
__global__ void statevec_applyPhaseFuncOverridesKernel(
Qureg qureg, int* qubits, int numQubits, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int numTerms,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=qureg.numAmpsPerChunk) return;
// determine global amplitude index (non-distributed, so it's just local index)
long long int globalAmpInd = index;
// determine phase index of {qubits}
long long int phaseInd = 0LL;
if (encoding == UNSIGNED) {
for (int q=0; q<numQubits; q++)
phaseInd += (1LL << q) * extractBit(qubits[q], globalAmpInd);
}
else if (encoding == TWOS_COMPLEMENT) {
for (int q=0; q<numQubits-1; q++) // use final qubit to indicate sign
phaseInd += (1LL << q) * extractBit(qubits[q], globalAmpInd);
if (extractBit(qubits[numQubits-1], globalAmpInd) == 1)
phaseInd -= (1LL << (numQubits-1));
}
// determine if this phase index has an overriden value (i < numOverrides)
int i;
for (i=0; i<numOverrides; i++)
if (phaseInd == overrideInds[i])
break;
// determine phase from {coeffs}, {exponents} (unless overriden)
qreal phase = 0;
if (i < numOverrides)
phase = overridePhases[i];
else
for (int t=0; t<numTerms; t++)
phase += coeffs[t] * pow(phaseInd, exponents[t]);
// negate phase to conjugate operator
if (conj)
phase *= -1;
// modify amp to amp * exp(i phase)
qreal c = cos(phase);
qreal s = sin(phase);
qreal re = qureg.deviceStateVec.real[index];
qreal im = qureg.deviceStateVec.imag[index];
// = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)}
qureg.deviceStateVec.real[index] = re*c - im*s;
qureg.deviceStateVec.imag[index] = re*s + im*c;
}
void statevec_applyPhaseFuncOverrides(
Qureg qureg, int* qubits, int numQubits, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int numTerms,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
// allocate device space for global list of {qubits}, {coeffs}, {exponents}, {overrideInds} and {overridePhases}
int* d_qubits; size_t mem_qubits = numQubits * sizeof *d_qubits;
qreal* d_coeffs; size_t mem_terms = numTerms * sizeof *d_coeffs;
qreal* d_exponents;
long long int* d_overrideInds; size_t mem_inds = numOverrides * sizeof *d_overrideInds;
qreal* d_overridePhases; size_t mem_phas = numOverrides * sizeof *d_overridePhases;
cudaMalloc(&d_qubits, mem_qubits); cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice);
cudaMalloc(&d_coeffs, mem_terms); cudaMemcpy(d_coeffs, coeffs, mem_terms, cudaMemcpyHostToDevice);
cudaMalloc(&d_exponents, mem_terms); cudaMemcpy(d_exponents, exponents, mem_terms, cudaMemcpyHostToDevice);
cudaMalloc(&d_overrideInds, mem_inds); cudaMemcpy(d_overrideInds, overrideInds, mem_inds, cudaMemcpyHostToDevice);
cudaMalloc(&d_overridePhases,mem_phas); cudaMemcpy(d_overridePhases, overridePhases, mem_phas, cudaMemcpyHostToDevice);
// call kernel
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock);
statevec_applyPhaseFuncOverridesKernel<<<CUDABlocks,threadsPerCUDABlock>>>(
qureg, d_qubits, numQubits, encoding,
d_coeffs, d_exponents, numTerms,
d_overrideInds, d_overridePhases, numOverrides,
conj);
// cleanup device memory
cudaFree(d_qubits);
cudaFree(d_coeffs);
cudaFree(d_exponents);
cudaFree(d_overrideInds);
cudaFree(d_overridePhases);
}
__global__ void statevec_applyMultiVarPhaseFuncOverridesKernel(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int* numTermsPerReg,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
long long int *phaseInds,
int conj
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=qureg.numAmpsPerChunk) return;
// determine global amplitude index (non-distributed, so it's just local index)
long long int globalAmpInd = index;
/*
* each thread needs to write to a local:
* long long int phaseInds[numRegs];
* but instead has access to shared array phaseInds, with below stride and offset
*/
size_t stride = gridDim.x*blockDim.x;
size_t offset = blockIdx.x*blockDim.x + threadIdx.x;
// determine phase indices
int flatInd = 0;
if (encoding == UNSIGNED) {
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
}
}
else if (encoding == TWOS_COMPLEMENT) {
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]-1; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
// use final qubit to indicate sign
if (extractBit(qubits[flatInd++], globalAmpInd) == 1)
phaseInds[r*stride+offset] -= (1LL << (numQubitsPerReg[r]-1));
}
}
// determine if this phase index has an overriden value (i < numOverrides)
int i;
for (i=0; i<numOverrides; i++) {
int found = 1;
for (int r=0; r<numRegs; r++) {
if (phaseInds[r*stride+offset] != overrideInds[i*numRegs+r]) {
found = 0;
break;
}
}
if (found)
break;
}
// compute the phase (unless overriden)
qreal phase = 0;
if (i < numOverrides)
phase = overridePhases[i];
else {
flatInd = 0;
for (int r=0; r<numRegs; r++) {
for (int t=0; t<numTermsPerReg[r]; t++) {
phase += coeffs[flatInd] * pow(phaseInds[r*stride+offset], exponents[flatInd]);
flatInd++;
}
}
}
// negate phase to conjugate operator
if (conj)
phase *= -1;
// modify amp to amp * exp(i phase)
qreal c = cos(phase);
qreal s = sin(phase);
qreal re = qureg.deviceStateVec.real[index];
qreal im = qureg.deviceStateVec.imag[index];
// = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)}
qureg.deviceStateVec.real[index] = re*c - im*s;
qureg.deviceStateVec.imag[index] = re*s + im*c;
}
void statevec_applyMultiVarPhaseFuncOverrides(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int* numTermsPerReg,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
// determine size of arrays, for cloning into GPU memory
size_t mem_numQubitsPerReg = numRegs * sizeof *numQubitsPerReg;
size_t mem_numTermsPerReg = numRegs * sizeof *numTermsPerReg;
size_t mem_overridePhases = numOverrides * sizeof *overridePhases;
size_t mem_overrideInds = numOverrides * numRegs * sizeof *overrideInds;
size_t mem_qubits = 0;
size_t mem_coeffs = 0;
size_t mem_exponents = 0;
for (int r=0; r<numRegs; r++) {
mem_qubits += numQubitsPerReg[r] * sizeof *qubits;
mem_coeffs += numTermsPerReg[r] * sizeof *coeffs;
mem_exponents += numTermsPerReg[r] * sizeof *exponents;
}
// allocate global GPU memory
int* d_qubits; cudaMalloc(&d_qubits, mem_qubits);
qreal* d_coeffs; cudaMalloc(&d_coeffs, mem_coeffs);
qreal* d_exponents; cudaMalloc(&d_exponents, mem_exponents);
int* d_numQubitsPerReg; cudaMalloc(&d_numQubitsPerReg, mem_numQubitsPerReg);
int* d_numTermsPerReg; cudaMalloc(&d_numTermsPerReg, mem_numTermsPerReg);
long long int* d_overrideInds; cudaMalloc(&d_overrideInds, mem_overrideInds);
qreal* d_overridePhases; cudaMalloc(&d_overridePhases, mem_overridePhases);
// copy function args into GPU memory
cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice);
cudaMemcpy(d_coeffs, coeffs, mem_coeffs, cudaMemcpyHostToDevice);
cudaMemcpy(d_exponents, exponents, mem_exponents, cudaMemcpyHostToDevice);
cudaMemcpy(d_numQubitsPerReg, numQubitsPerReg, mem_numQubitsPerReg, cudaMemcpyHostToDevice);
cudaMemcpy(d_numTermsPerReg, numTermsPerReg, mem_numTermsPerReg, cudaMemcpyHostToDevice);
cudaMemcpy(d_overrideInds, overrideInds, mem_overrideInds, cudaMemcpyHostToDevice);
cudaMemcpy(d_overridePhases, overridePhases, mem_overridePhases, cudaMemcpyHostToDevice);
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock);
// allocate thread-local working space {phaseInds}
long long int *d_phaseInds;
size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks;
cudaMalloc(&d_phaseInds, numRegs*gridSize * sizeof *d_phaseInds);
// call kernel
statevec_applyMultiVarPhaseFuncOverridesKernel<<<CUDABlocks,threadsPerCUDABlock>>>(
qureg, d_qubits, d_numQubitsPerReg, numRegs, encoding,
d_coeffs, d_exponents, d_numTermsPerReg,
d_overrideInds, d_overridePhases, numOverrides,
d_phaseInds,
conj);
// free device memory
cudaFree(d_qubits);
cudaFree(d_coeffs);
cudaFree(d_exponents);
cudaFree(d_numQubitsPerReg);
cudaFree(d_numTermsPerReg);
cudaFree(d_overrideInds);
cudaFree(d_overridePhases);
cudaFree(d_phaseInds);
}
__global__ void statevec_applyParamNamedPhaseFuncOverridesKernel(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
enum phaseFunc phaseFuncName, qreal* params, int numParams,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
long long int* phaseInds,
int conj
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=qureg.numAmpsPerChunk) return;
// determine global amplitude index (non-distributed, so it's just local index)
long long int globalAmpInd = index;
/*
* each thread needs to write to a local:
* long long int phaseInds[numRegs];
* but instead has access to shared array phaseInds, with below stride and offset
*/
size_t stride = gridDim.x*blockDim.x;
size_t offset = blockIdx.x*blockDim.x + threadIdx.x;
// determine phase indices
if (encoding == UNSIGNED) {
int flatInd = 0;
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
}
}
else if (encoding == TWOS_COMPLEMENT) {
int flatInd = 0;
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]-1; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
// use final qubit to indicate sign
if (extractBit(qubits[flatInd++], globalAmpInd) == 1)
phaseInds[r*stride+offset] -= (1LL << (numQubitsPerReg[r]-1));
}
}
// determine if this phase index has an overriden value (i < numOverrides)
int i;
for (i=0; i<numOverrides; i++) {
int found = 1;
for (int r=0; r<numRegs; r++) {
if (phaseInds[r*stride+offset] != overrideInds[i*numRegs+r]) {
found = 0;
break;
}
}
if (found)
break;
}
// compute the phase (unless overriden)
qreal phase = 0;
if (i < numOverrides)
phase = overridePhases[i];
else {
// compute norm related phases
if (phaseFuncName == NORM || phaseFuncName == INVERSE_NORM ||
phaseFuncName == SCALED_NORM || phaseFuncName == SCALED_INVERSE_NORM ||
phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) {
qreal norm = 0;
if (phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) {
for (int r=0; r<numRegs; r++) {
qreal dif = phaseInds[r*stride+offset] - params[2+r];
norm += dif*dif;
}
}
else
for (int r=0; r<numRegs; r++)
norm += phaseInds[r*stride+offset]*phaseInds[r*stride+offset];
norm = sqrt(norm);
if (phaseFuncName == NORM)
phase = norm;
else if (phaseFuncName == INVERSE_NORM)
phase = (norm == 0.)? params[0] : 1/norm;
else if (phaseFuncName == SCALED_NORM)
phase = params[0] * norm;
else if (phaseFuncName == SCALED_INVERSE_NORM || phaseFuncName == SCALED_INVERSE_SHIFTED_NORM)
phase = (norm == 0.)? params[1] : params[0] / norm;
}
// compute product related phases
else if (phaseFuncName == PRODUCT || phaseFuncName == INVERSE_PRODUCT ||
phaseFuncName == SCALED_PRODUCT || phaseFuncName == SCALED_INVERSE_PRODUCT) {
qreal prod = 1;
for (int r=0; r<numRegs; r++)
prod *= phaseInds[r*stride+offset];
if (phaseFuncName == PRODUCT)
phase = prod;
else if (phaseFuncName == INVERSE_PRODUCT)
phase = (prod == 0.)? params[0] : 1/prod;
else if (phaseFuncName == SCALED_PRODUCT)
phase = params[0] * prod;
else if (phaseFuncName == SCALED_INVERSE_PRODUCT)
phase = (prod == 0.)? params[1] : params[0] / prod;
}
// compute Euclidean distance related phases
else if (phaseFuncName == DISTANCE || phaseFuncName == INVERSE_DISTANCE ||
phaseFuncName == SCALED_DISTANCE || phaseFuncName == SCALED_INVERSE_DISTANCE ||
phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE) {
qreal dist = 0;
if (phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE) {
for (int r=0; r<numRegs; r+=2) {
qreal dif = (phaseInds[(r+1)*stride+offset] - phaseInds[r*stride+offset] - params[2+r/2]);
dist += dif*dif;
}
}
else
for (int r=0; r<numRegs; r+=2) {
qreal dif = (phaseInds[(r+1)*stride+offset] - phaseInds[r*stride+offset]);
dist += dif*dif;
}
dist = sqrt(dist);
if (phaseFuncName == DISTANCE)
phase = dist;
else if (phaseFuncName == INVERSE_DISTANCE)
phase = (dist == 0.)? params[0] : 1/dist;
else if (phaseFuncName == SCALED_DISTANCE)
phase = params[0] * dist;
else if (phaseFuncName == SCALED_INVERSE_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE)
phase = (dist == 0.)? params[1] : params[0] / dist;
}
}
// negate phase to conjugate operator
if (conj)
phase *= -1;
// modify amp to amp * exp(i phase)
qreal c = cos(phase);
qreal s = sin(phase);
qreal re = qureg.deviceStateVec.real[index];
qreal im = qureg.deviceStateVec.imag[index];
// = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)}
qureg.deviceStateVec.real[index] = re*c - im*s;
qureg.deviceStateVec.imag[index] = re*s + im*c;
}
void statevec_applyParamNamedPhaseFuncOverrides(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
enum phaseFunc phaseFuncName, qreal* params, int numParams,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
// determine size of arrays, for cloning into GPU memory
size_t mem_numQubitsPerReg = numRegs * sizeof *numQubitsPerReg;
size_t mem_overridePhases = numOverrides * sizeof *overridePhases;
size_t mem_overrideInds = numOverrides * numRegs * sizeof *overrideInds;
size_t mem_params = numParams * sizeof *params;
size_t mem_qubits = 0;
for (int r=0; r<numRegs; r++)
mem_qubits += numQubitsPerReg[r] * sizeof *qubits;
// allocate global GPU memory
int* d_qubits; cudaMalloc(&d_qubits, mem_qubits);
int* d_numQubitsPerReg; cudaMalloc(&d_numQubitsPerReg, mem_numQubitsPerReg);
long long int* d_overrideInds; cudaMalloc(&d_overrideInds, mem_overrideInds);
qreal* d_overridePhases; cudaMalloc(&d_overridePhases, mem_overridePhases);
qreal* d_params = NULL; if (numParams > 0) cudaMalloc(&d_params, mem_params);
// copy function args into GPU memory
cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice);
cudaMemcpy(d_numQubitsPerReg, numQubitsPerReg, mem_numQubitsPerReg, cudaMemcpyHostToDevice);
cudaMemcpy(d_overrideInds, overrideInds, mem_overrideInds, cudaMemcpyHostToDevice);
cudaMemcpy(d_overridePhases, overridePhases, mem_overridePhases, cudaMemcpyHostToDevice);
if (numParams > 0)
cudaMemcpy(d_params, params, mem_params, cudaMemcpyHostToDevice);
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock);
// allocate thread-local working space {phaseInds}
long long int *d_phaseInds;
size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks;
cudaMalloc(&d_phaseInds, numRegs*gridSize * sizeof *d_phaseInds);
// call kernel
statevec_applyParamNamedPhaseFuncOverridesKernel<<<CUDABlocks,threadsPerCUDABlock>>>(
qureg, d_qubits, d_numQubitsPerReg, numRegs, encoding,
phaseFuncName, d_params, numParams,
d_overrideInds, d_overridePhases, numOverrides,
d_phaseInds,
conj);
// free device memory
cudaFree(d_qubits);
cudaFree(d_numQubitsPerReg);
cudaFree(d_overrideInds);
cudaFree(d_overridePhases);
cudaFree(d_phaseInds);
if (numParams > 0)
cudaFree(d_params);
}
void seedQuESTDefault(){
// init MT random number generator with three keys -- time and pid
// for the MPI version, it is ok that all procs will get the same seed as random numbers will only be
// used by the master process
unsigned long int key[2];
getQuESTDefaultSeedKey(key);
init_by_array(key, 2);
}
#ifdef __cplusplus
}
#endif
|
919681cadc022c40f9fae5d739675bc6894c368a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#define get_idx() (threadIdx.x)
__global__ void sum(float *x) {
int idx = get_idx();
x[idx] += 1;
}
int main() {
int N = 32;
int nbytes = N * sizeof(float);
float *dx = NULL, *hx = NULL;
/* allocate GPU memory */
hipMalloc((void **)&dx, nbytes);
if (dx == NULL) {
printf("couldn't allocate GPU memory");
return -1;
}
/* allocate CPU memory */
hx = (float*) malloc(nbytes);
//hipMalloc((void **)&hx, nbytes);
if (hx == NULL) {
printf("couldn't allocate CPU memory");
return -2;
}
/* init */
printf("hx original: \n");
for (int i = 0; i < N; i++) {
hx[i] = i;
printf("%g\n", hx[i]);
}
/* copy data to GPU */
hipMemcpy(dx, hx, nbytes, hipMemcpyHostToDevice);
/* call GPU */
hipLaunchKernelGGL(( sum) , dim3(1), dim3(N), 0, 0, dx);
/* let GPU finish */
hipDeviceSynchronize();
/* copy data from GPU */
hipMemcpy(hx, dx, nbytes, hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%g\n", hx[i]);
}
return 0;
}
| 919681cadc022c40f9fae5d739675bc6894c368a.cu | #include <stdio.h>
#include "cuda.h"
#include "cuda_runtime.h"
#define get_idx() (threadIdx.x)
__global__ void sum(float *x) {
int idx = get_idx();
x[idx] += 1;
}
int main() {
int N = 32;
int nbytes = N * sizeof(float);
float *dx = NULL, *hx = NULL;
/* allocate GPU memory */
cudaMalloc((void **)&dx, nbytes);
if (dx == NULL) {
printf("couldn't allocate GPU memory");
return -1;
}
/* allocate CPU memory */
hx = (float*) malloc(nbytes);
//cudaMalloc((void **)&hx, nbytes);
if (hx == NULL) {
printf("couldn't allocate CPU memory");
return -2;
}
/* init */
printf("hx original: \n");
for (int i = 0; i < N; i++) {
hx[i] = i;
printf("%g\n", hx[i]);
}
/* copy data to GPU */
cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice);
/* call GPU */
sum <<<1, N>>> (dx);
/* let GPU finish */
cudaThreadSynchronize();
/* copy data from GPU */
cudaMemcpy(hx, dx, nbytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%g\n", hx[i]);
}
return 0;
}
|
fd7974e228e5a1441f0728b438fe251406cee3bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void make_gpu_busy(int* buf, size_t size, int iterations)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
size_t step = blockDim.x * gridDim.x;
for (size_t i = idx; i < size; i += step)
{
float f = buf[i];
double f2 = buf[i];
for (int j = 0; j < iterations; j++)
{
if (buf[i] % 2)
buf[i] = buf[i] * 3 + 1;
else
buf[i] /= 2;
// Add more calculations to burn more power
f2 = f2 * 0.5 + buf[i];
f = f * 0.5 + sqrtf(buf[i] + f);
}
buf[i] += (int) f + f2;
}
}
| fd7974e228e5a1441f0728b438fe251406cee3bd.cu | extern "C" __global__ void make_gpu_busy(int* buf, size_t size, int iterations)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
size_t step = blockDim.x * gridDim.x;
for (size_t i = idx; i < size; i += step)
{
float f = buf[i];
double f2 = buf[i];
for (int j = 0; j < iterations; j++)
{
if (buf[i] % 2)
buf[i] = buf[i] * 3 + 1;
else
buf[i] /= 2;
// Add more calculations to burn more power
f2 = f2 * 0.5 + buf[i];
f = f * 0.5 + sqrtf(buf[i] + f);
}
buf[i] += (int) f + f2;
}
}
|
7a9c5fba561b0bcf65e149a695db47edd5c1aa89.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_wtf.cu
*
* @brief Simple test driver program for computing Pagerank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/wtf/wtf_enactor.cuh>
#include <gunrock/app/wtf/wtf_problem.cuh>
#include <gunrock/app/wtf/wtf_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/page_rank.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::wtf;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair {
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool PRCompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_wtf <graph type> <graph type args> [--device=<device_index>] "
"[--undirected] [--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --undirected If set then treat the graph as undirected.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] node_id Pointer to node ID array
* @param[in] rank Pointer to node rank score array
* @param[in] nodes Number of nodes in the graph.
*/
template<typename VertexId, typename Value, typename SizeT>
void DisplaySolution(VertexId *node_id, Value *rank, SizeT nodes)
{
// Print out at most top 10 largest components
int top = (nodes < 10) ? nodes : 10;
printf("Top %d Page Ranks:\n", top);
for (int i = 0; i < top; ++i)
{
printf("Vertex ID: %d, Page Rank: %5f\n", node_id[i], rank[i]);
}
}
/**
* Performance/Evaluation statistics
*/
struct Stats {
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) : name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] h_rank Host-side vector stores computed page rank values for validation
* @param[in] graph Reference to the CSR graph we process on
* @param[in] elapsed Total elapsed kernel running time
* @param[in] total_queued Total element queued in BFS kernel running process
* @param[in] avg_duty Average duty of the BFS kernels
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void DisplayStats(
Stats &stats,
Value *h_rank,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
long long total_queued,
double avg_duty)
{
// Display test name
printf("[%s] finished. ", stats.name);
// Display the specific sample statistics
printf(" elapsed: %.3f ms", elapsed);
if (avg_duty != 0) {
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n");
}
/******************************************************************************
* WTF Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference Page Rank implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[out] node_id Pointer to store computed output node ID
* @param[in] rank Host-side vector to store CPU computed labels for each node
* @param[in] delta Delta value for computing PageRank score
* @param[in] alpha Parameter to adjust iteration number
* @param[in] max_iter max iteration to go
*/
// TODO: Boost PageRank cannot handle personalized pagerank, so currently the CPU
// implementation gives incorrect answer. Need to find a CPU PPR implementation
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferencePr(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
VertexId *node_id,
Value *rank,
Value delta,
Value alpha,
SizeT max_iter)
{
using namespace boost;
//Preparation
typedef adjacency_list<vecS, vecS, bidirectionalS, no_property, property<edge_index_t, int> > Graph;
Graph g;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
Graph::edge_descriptor e =
add_edge(i, graph.column_indices[j], g).first;
put(edge_index, g, e, i);
}
}
//
//compute page rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
//remove_dangling_links(g);
std::vector<Value> ranks(num_vertices(g));
page_rank(g,
make_iterator_property_map(ranks.begin(),
get(boost::vertex_index, g)),
boost::graph::n_iterations(max_iter));
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
for (std::size_t i = 0; i < num_vertices(g); ++i) {
rank[i] = ranks[i];
}
//sort the top page ranks
RankPair<SizeT, Value> *pr_list = (RankPair<SizeT, Value>*)malloc(sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
pr_list[i].vertex_id = i;
pr_list[i].page_rank = rank[i];
}
std::stable_sort(pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
std::vector<int> in_degree(num_vertices(g));
std::vector<Value> refscore(num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = pr_list[i].vertex_id;
rank[i] = (i == src) ? 1.0 : 0;
in_degree[i] = 0;
refscore[i] = 0;
}
free(pr_list);
int cot_size = (graph.nodes > 1000) ? 1000 : graph.nodes;
for (int i = 0; i < cot_size; ++i) {
int node = node_id[i];
for (int j = graph.row_offsets[node]; j < graph.row_offsets[node+1]; ++j) {
VertexId edge = graph.column_indices[j];
++in_degree[edge];
}
}
int salsa_iter = 1.0/alpha+1;
for (int iter = 0; iter < salsa_iter; ++iter) {
for (int i = 0; i < cot_size; ++i) {
int node = node_id[i];
int out_degree = graph.row_offsets[node+1]-graph.row_offsets[node];
for (int j = graph.row_offsets[node]; j < graph.row_offsets[node+1]; ++j) {
VertexId edge = graph.column_indices[j];
Value val = rank[node]/ (out_degree > 0 ? out_degree : 1.0);
refscore[edge] += val;
}
}
for (int i = 0; i < cot_size; ++i) {
rank[node_id[i]] = 0;
}
for (int i = 0; i < cot_size; ++i) {
int node = node_id[i];
rank[node] += (node == src) ? alpha : 0;
for (int j = graph.row_offsets[node]; j < graph.row_offsets[node+1]; ++j) {
VertexId edge = graph.column_indices[j];
Value val = (1-alpha)*refscore[edge]/in_degree[edge];
rank[node] += val;
}
}
for (int i = 0; i < cot_size; ++i) {
if (iter+1<salsa_iter) refscore[node_id[i]] = 0;
}
}
//sort the top page ranks
RankPair<SizeT, Value> *final_list = (RankPair<SizeT, Value>*)malloc(sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
final_list[i].vertex_id = node_id[i];
final_list[i].page_rank = refscore[i];
}
std::stable_sort(final_list, final_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = final_list[i].vertex_id;
rank[i] = final_list[i].page_rank;
}
free(final_list);
printf("CPU Who-To-Follow finished in %lf msec.\n", elapsed);
}
/**
* @brief Run PR tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[in] delta Delta value for computing PageRank, usually set to .85
* @param[in] alpha Parameter to adjust iteration number
* @param[in] error Error threshold value
* @param[in] max_iter Max iteration for Page Rank computing
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] context CudaContext for moderngpu to use
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
Value delta,
Value alpha,
Value error,
SizeT max_iter,
int max_grid_size,
int num_gpus,
CudaContext& context)
{
typedef WTFProblem<
VertexId,
SizeT,
Value> Problem;
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_rank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_rank = (Value*)malloc(sizeof(Value) * graph.nodes);
VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_node_id = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
Value *reference_check = (g_quick) ? NULL : reference_rank;
// Allocate BFS enactor map
WTFEnactor<INSTRUMENT> wtf_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus), "Problem WTF Initialization Failed", __FILE__, __LINE__);
Stats *stats = new Stats("GPU Who-To-Follow");
long long total_queued = 0;
double avg_duty = 0.0;
// Perform BFS
GpuTimer gpu_timer;
util::GRError(csr_problem->Reset(src, delta, alpha, error, wtf_enactor.GetFrontierType()), "pr Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(wtf_enactor.template Enact<Problem>(context, src, alpha, csr_problem, max_iter, max_grid_size), "pr Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
wtf_enactor.GetStatistics(total_queued, avg_duty);
float elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(csr_problem->Extract(h_rank, h_node_id), "PageRank Problem Data Extraction Failed", __FILE__, __LINE__);
float total_pr = 0;
for (int i = 0; i < graph.nodes; ++i)
{
total_pr += h_rank[i];
}
//
// Compute reference CPU PR solution for source-distance
//
if (reference_check != NULL && total_pr > 0)
{
printf("compute ref value\n");
SimpleReferencePr(
graph,
src,
reference_node_id,
reference_check,
delta,
alpha,
max_iter);
printf("\n");
}
// Verify the result
if (reference_check != NULL && total_pr > 0) {
printf("Validity: ");
CompareResults(h_rank, reference_check, graph.nodes, true);
}
printf("\nGPU result.");
// Display Solution
DisplaySolution(h_node_id, h_rank, graph.nodes);
DisplayStats(
*stats,
h_rank,
graph,
elapsed,
total_queued,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_check) free(reference_check);
if (h_rank) free(h_rank);
hipDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
* @param[in] context CudaContex for moderngpu library
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args,
CudaContext& context)
{
Value delta = 0.85f; // Use whatever the specified graph-type's default is
Value alpha = 0.2f;
Value error = 0.01f; // Error threshold
SizeT max_iter = 5;
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
VertexId src = -1;
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("delta", delta);
args.GetCmdLineArgument("alpha", alpha);
args.GetCmdLineArgument("error", error);
args.GetCmdLineArgument("max-iter", max_iter);
args.GetCmdLineArgument("src", src);
//g_quick = args.CheckCmdLineFlag("quick");
g_quick = true;
g_verbose = args.CheckCmdLineFlag("v");
if (instrumented) {
RunTests<VertexId, Value, SizeT, true>(
graph,
src,
delta,
alpha,
error,
max_iter,
max_grid_size,
num_gpus,
context);
} else {
RunTests<VertexId, Value, SizeT, false>(
graph,
src,
delta,
alpha,
error,
max_iter,
max_grid_size,
num_gpus,
context);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help"))) {
Usage();
return 1;
}
//DeviceInit(args);
//hipSetDeviceFlags(hipDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1) {
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market") {
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier type
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default value for stream_from_host is false
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
csr.DisplayGraph();
// Run tests
RunTests(csr, args, *context);
} else {
// Unknown graph type
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
| 7a9c5fba561b0bcf65e149a695db47edd5c1aa89.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_wtf.cu
*
* @brief Simple test driver program for computing Pagerank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/wtf/wtf_enactor.cuh>
#include <gunrock/app/wtf/wtf_problem.cuh>
#include <gunrock/app/wtf/wtf_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/page_rank.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::wtf;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair {
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool PRCompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_wtf <graph type> <graph type args> [--device=<device_index>] "
"[--undirected] [--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --undirected If set then treat the graph as undirected.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] node_id Pointer to node ID array
* @param[in] rank Pointer to node rank score array
* @param[in] nodes Number of nodes in the graph.
*/
template<typename VertexId, typename Value, typename SizeT>
void DisplaySolution(VertexId *node_id, Value *rank, SizeT nodes)
{
// Print out at most top 10 largest components
int top = (nodes < 10) ? nodes : 10;
printf("Top %d Page Ranks:\n", top);
for (int i = 0; i < top; ++i)
{
printf("Vertex ID: %d, Page Rank: %5f\n", node_id[i], rank[i]);
}
}
/**
* Performance/Evaluation statistics
*/
struct Stats {
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) : name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] h_rank Host-side vector stores computed page rank values for validation
* @param[in] graph Reference to the CSR graph we process on
* @param[in] elapsed Total elapsed kernel running time
* @param[in] total_queued Total element queued in BFS kernel running process
* @param[in] avg_duty Average duty of the BFS kernels
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void DisplayStats(
Stats &stats,
Value *h_rank,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
long long total_queued,
double avg_duty)
{
// Display test name
printf("[%s] finished. ", stats.name);
// Display the specific sample statistics
printf(" elapsed: %.3f ms", elapsed);
if (avg_duty != 0) {
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n");
}
/******************************************************************************
* WTF Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference Page Rank implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[out] node_id Pointer to store computed output node ID
* @param[in] rank Host-side vector to store CPU computed labels for each node
* @param[in] delta Delta value for computing PageRank score
* @param[in] alpha Parameter to adjust iteration number
* @param[in] max_iter max iteration to go
*/
// TODO: Boost PageRank cannot handle personalized pagerank, so currently the CPU
// implementation gives incorrect answer. Need to find a CPU PPR implementation
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferencePr(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
VertexId *node_id,
Value *rank,
Value delta,
Value alpha,
SizeT max_iter)
{
using namespace boost;
//Preparation
typedef adjacency_list<vecS, vecS, bidirectionalS, no_property, property<edge_index_t, int> > Graph;
Graph g;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
Graph::edge_descriptor e =
add_edge(i, graph.column_indices[j], g).first;
put(edge_index, g, e, i);
}
}
//
//compute page rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
//remove_dangling_links(g);
std::vector<Value> ranks(num_vertices(g));
page_rank(g,
make_iterator_property_map(ranks.begin(),
get(boost::vertex_index, g)),
boost::graph::n_iterations(max_iter));
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
for (std::size_t i = 0; i < num_vertices(g); ++i) {
rank[i] = ranks[i];
}
//sort the top page ranks
RankPair<SizeT, Value> *pr_list = (RankPair<SizeT, Value>*)malloc(sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
pr_list[i].vertex_id = i;
pr_list[i].page_rank = rank[i];
}
std::stable_sort(pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
std::vector<int> in_degree(num_vertices(g));
std::vector<Value> refscore(num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = pr_list[i].vertex_id;
rank[i] = (i == src) ? 1.0 : 0;
in_degree[i] = 0;
refscore[i] = 0;
}
free(pr_list);
int cot_size = (graph.nodes > 1000) ? 1000 : graph.nodes;
for (int i = 0; i < cot_size; ++i) {
int node = node_id[i];
for (int j = graph.row_offsets[node]; j < graph.row_offsets[node+1]; ++j) {
VertexId edge = graph.column_indices[j];
++in_degree[edge];
}
}
int salsa_iter = 1.0/alpha+1;
for (int iter = 0; iter < salsa_iter; ++iter) {
for (int i = 0; i < cot_size; ++i) {
int node = node_id[i];
int out_degree = graph.row_offsets[node+1]-graph.row_offsets[node];
for (int j = graph.row_offsets[node]; j < graph.row_offsets[node+1]; ++j) {
VertexId edge = graph.column_indices[j];
Value val = rank[node]/ (out_degree > 0 ? out_degree : 1.0);
refscore[edge] += val;
}
}
for (int i = 0; i < cot_size; ++i) {
rank[node_id[i]] = 0;
}
for (int i = 0; i < cot_size; ++i) {
int node = node_id[i];
rank[node] += (node == src) ? alpha : 0;
for (int j = graph.row_offsets[node]; j < graph.row_offsets[node+1]; ++j) {
VertexId edge = graph.column_indices[j];
Value val = (1-alpha)*refscore[edge]/in_degree[edge];
rank[node] += val;
}
}
for (int i = 0; i < cot_size; ++i) {
if (iter+1<salsa_iter) refscore[node_id[i]] = 0;
}
}
//sort the top page ranks
RankPair<SizeT, Value> *final_list = (RankPair<SizeT, Value>*)malloc(sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
final_list[i].vertex_id = node_id[i];
final_list[i].page_rank = refscore[i];
}
std::stable_sort(final_list, final_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = final_list[i].vertex_id;
rank[i] = final_list[i].page_rank;
}
free(final_list);
printf("CPU Who-To-Follow finished in %lf msec.\n", elapsed);
}
/**
* @brief Run PR tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[in] delta Delta value for computing PageRank, usually set to .85
* @param[in] alpha Parameter to adjust iteration number
* @param[in] error Error threshold value
* @param[in] max_iter Max iteration for Page Rank computing
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] context CudaContext for moderngpu to use
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
Value delta,
Value alpha,
Value error,
SizeT max_iter,
int max_grid_size,
int num_gpus,
CudaContext& context)
{
typedef WTFProblem<
VertexId,
SizeT,
Value> Problem;
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_rank = (Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_rank = (Value*)malloc(sizeof(Value) * graph.nodes);
VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_node_id = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
Value *reference_check = (g_quick) ? NULL : reference_rank;
// Allocate BFS enactor map
WTFEnactor<INSTRUMENT> wtf_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus), "Problem WTF Initialization Failed", __FILE__, __LINE__);
Stats *stats = new Stats("GPU Who-To-Follow");
long long total_queued = 0;
double avg_duty = 0.0;
// Perform BFS
GpuTimer gpu_timer;
util::GRError(csr_problem->Reset(src, delta, alpha, error, wtf_enactor.GetFrontierType()), "pr Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(wtf_enactor.template Enact<Problem>(context, src, alpha, csr_problem, max_iter, max_grid_size), "pr Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
wtf_enactor.GetStatistics(total_queued, avg_duty);
float elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(csr_problem->Extract(h_rank, h_node_id), "PageRank Problem Data Extraction Failed", __FILE__, __LINE__);
float total_pr = 0;
for (int i = 0; i < graph.nodes; ++i)
{
total_pr += h_rank[i];
}
//
// Compute reference CPU PR solution for source-distance
//
if (reference_check != NULL && total_pr > 0)
{
printf("compute ref value\n");
SimpleReferencePr(
graph,
src,
reference_node_id,
reference_check,
delta,
alpha,
max_iter);
printf("\n");
}
// Verify the result
if (reference_check != NULL && total_pr > 0) {
printf("Validity: ");
CompareResults(h_rank, reference_check, graph.nodes, true);
}
printf("\nGPU result.");
// Display Solution
DisplaySolution(h_node_id, h_rank, graph.nodes);
DisplayStats(
*stats,
h_rank,
graph,
elapsed,
total_queued,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_check) free(reference_check);
if (h_rank) free(h_rank);
cudaDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
* @param[in] context CudaContex for moderngpu library
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args,
CudaContext& context)
{
Value delta = 0.85f; // Use whatever the specified graph-type's default is
Value alpha = 0.2f;
Value error = 0.01f; // Error threshold
SizeT max_iter = 5;
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
VertexId src = -1;
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("delta", delta);
args.GetCmdLineArgument("alpha", alpha);
args.GetCmdLineArgument("error", error);
args.GetCmdLineArgument("max-iter", max_iter);
args.GetCmdLineArgument("src", src);
//g_quick = args.CheckCmdLineFlag("quick");
g_quick = true;
g_verbose = args.CheckCmdLineFlag("v");
if (instrumented) {
RunTests<VertexId, Value, SizeT, true>(
graph,
src,
delta,
alpha,
error,
max_iter,
max_grid_size,
num_gpus,
context);
} else {
RunTests<VertexId, Value, SizeT, false>(
graph,
src,
delta,
alpha,
error,
max_iter,
max_grid_size,
num_gpus,
context);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help"))) {
Usage();
return 1;
}
//DeviceInit(args);
//cudaSetDeviceFlags(cudaDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1) {
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market") {
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier type
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default value for stream_from_host is false
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
csr.DisplayGraph();
// Run tests
RunTests(csr, args, *context);
} else {
// Unknown graph type
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
|
9d1b7838ee1b5bddc13de9bd1cdbc4a1715aac6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Discrete Cosine Transform(DCT one to four)
* DCT I ---> IV
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_GPU(A, type of DCT, dimensions).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "DCT_GPU.cuh"
#define DELTA(i, j) ((i==j)?1:0)
#define DEFAULT_DIM 32
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DCT_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DCT_Kernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
hipMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
hipMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
hipMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
hipMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, hipMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCT_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost);
C = hostC;
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
if (nrhs != 3) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidNumInputs",
"three input argument required.");
}
char row[] = "row";
char column[] = "column";
char one[] = "one";
char two[] = "two";
char three[] = "three";
char four[] = "four";
char *input_buf1;
input_buf1 = mxArrayToString(prhs[1]);
char *input_buf2;
input_buf2 = mxArrayToString(prhs[2]);
if (!(mxIsChar(prhs[1]))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input must be of type string.\n.");
}
if (!(mxIsChar(prhs[2]))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input must be of type string.\n.");
}
// row........................................................................................................
if (strcmp (row,input_buf2) == 0)
{
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs!=1) || !(mxIsGPUArray(prhs[0]))) {
// mexErrMsgIdAndTxt(errId, errMsg);
// }
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n");
return;
}
numDCOSRows=numDCOSColumns=numAColumns;
numCRows = numARows;
numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
// DCT I
if (strcmp (one,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
if (numDCOSColumns != 1){
pointer[i + j* numDCOSColumns] = cos((j*PI_d*i / (numDCOSColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numDCOSRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numDCOSColumns, j + 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
}
else{
pointer[i + j* numDCOSColumns] =1;
}
}
}
}
// DCT II
if (strcmp (two,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
pointer[i + j* numDCOSColumns] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
// DCT III
if (strcmp (three,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
pointer[i + j* numDCOSColumns] = cos(((2 * i + 1) / (2.0 * numDCOSColumns))*PI_d*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
// DCT IV
if (strcmp (four,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
pointer[i + j* numDCOSColumns] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numDCOSColumns)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 2;
}
}
}
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCT_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
// hipError_t err1 = hipPeekAtLastError();//To capture last error in function call
//hipDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numAColumns;
numCRows = numARows;
numCColumns = numBColumns;
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n");
return;
}
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Discrete Cosine Transform in row wise
//DCT I
if (strcmp (one,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
if (numBColumns != 1){
hostB[i + j* numBColumns] = cos((j*PI_d*i / (numBColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numBRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numBColumns, j + 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
else{
hostB[i + j* numBColumns] =1;
}
}
}
}
//DCT II
if (strcmp (two,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
hostB[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
//DCT III
if (strcmp (three,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
hostB[i + j* numBColumns] = cos(((2 * i + 1) / (2.0 * numBColumns))*PI_d*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
//DCT IV
if (strcmp (four,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
hostB[i + j* numBColumns] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 2;
}
}
}
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
mxFree(input_buf1);
mxFree(input_buf2);
}
// column........................................................................................................
if (strcmp (column,input_buf2) == 0)
{
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
// if ((nrhs!=1)) {
// mexErrMsgIdAndTxt(errId, errMsg);
//}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
numDCOSRows=numDCOSColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n");
return;
}
// numDCOSRows=numDCOSColumns=numAColumns;
// numCRows = numARows;
// numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
// DCT I
if (strcmp (one,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
if (numDCOSRows != 1){
pointer[i* numDCOSColumns + j] = cos((j*PI_d*i / (numDCOSRows - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numDCOSRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numDCOSColumns, j + 1)))*sqrt(2.0 / numDCOSColumns);
}
else{
pointer[i* numDCOSColumns + j] =1;
}
}
}
}
// DCT II
if (strcmp (two,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
pointer[i* numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
// DCT III
if (strcmp (three,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
pointer[i* numDCOSColumns + j] = cos(((2 * i + 1) / (2.0 * numDCOSColumns))*PI_d*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
// DCT IV
if (strcmp (four,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i + j* numBColumns] = cos(((2 * j + 1)*3.14*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
pointer[i* numDCOSColumns + j] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numDCOSColumns)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 2;
}
}
}
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
//(hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
DCT_GPUA<< <dimGrid, dimBlock >> >(d_DCOS, d_A, d_B, numDCOSRows, numDCOSColumns, numARows, numAColumns, numCRows, numCColumns);
// hipError_t err1 = hipPeekAtLastError();//To capture last error in function call
//hipDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n");
return;
}
// numBRows = numBColumns = numAColumns;
// numCRows = numARows;
//
// numCColumns = numBColumns;
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Discrete Cosine Transform in Columns wise
//DCT I
if (strcmp (one,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
if (numBRows != 1){
hostB[i* numBColumns + j] = cos((j*PI_d*i / (numBRows - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numBRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numBColumns, j + 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
//hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
else{
hostB[i* numBColumns + j] =1;
}
}
}
}
//DCT II
if (strcmp (two,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i + j* numBColumns] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numBColumns);
hostB[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
//hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
//DCT III
if (strcmp (three,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i + j* numBColumns] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numBColumns);
hostB[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*PI_d*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
//hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
//DCT IV
if (strcmp (four,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i + j* numBColumns] = cos(((2 * j + 1)*3.14*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
hostB[i* numBColumns + j] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 2;
}
}
}
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
// (hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform( hostB, hostA, pointer, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
mxFree(input_buf1);
mxFree(input_buf2);
}
}
| 9d1b7838ee1b5bddc13de9bd1cdbc4a1715aac6e.cu | /*
* Discrete Cosine Transform(DCT one to four)
* DCT I ---> IV
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_GPU(A, type of DCT, dimensions).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "DCT_GPU.cuh"
#define DELTA(i, j) ((i==j)?1:0)
#define DEFAULT_DIM 32
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DCT_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DCT_Kernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
cudaMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
cudaMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
cudaMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
cudaMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, cudaMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCT_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost);
C = hostC;
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
if (nrhs != 3) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidNumInputs",
"three input argument required.");
}
char row[] = "row";
char column[] = "column";
char one[] = "one";
char two[] = "two";
char three[] = "three";
char four[] = "four";
char *input_buf1;
input_buf1 = mxArrayToString(prhs[1]);
char *input_buf2;
input_buf2 = mxArrayToString(prhs[2]);
if (!(mxIsChar(prhs[1]))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input must be of type string.\n.");
}
if (!(mxIsChar(prhs[2]))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input must be of type string.\n.");
}
// row........................................................................................................
if (strcmp (row,input_buf2) == 0)
{
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs!=1) || !(mxIsGPUArray(prhs[0]))) {
// mexErrMsgIdAndTxt(errId, errMsg);
// }
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n");
return;
}
numDCOSRows=numDCOSColumns=numAColumns;
numCRows = numARows;
numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
// DCT I
if (strcmp (one,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
if (numDCOSColumns != 1){
pointer[i + j* numDCOSColumns] = cos((j*PI_d*i / (numDCOSColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numDCOSRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numDCOSColumns, j + 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
}
else{
pointer[i + j* numDCOSColumns] =1;
}
}
}
}
// DCT II
if (strcmp (two,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
pointer[i + j* numDCOSColumns] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
// DCT III
if (strcmp (three,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
pointer[i + j* numDCOSColumns] = cos(((2 * i + 1) / (2.0 * numDCOSColumns))*PI_d*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
// DCT IV
if (strcmp (four,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
pointer[i + j* numDCOSColumns] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numDCOSColumns)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 2;
}
}
}
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCT_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
// cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
//cudaDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numAColumns;
numCRows = numARows;
numCColumns = numBColumns;
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n");
return;
}
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Discrete Cosine Transform in row wise
//DCT I
if (strcmp (one,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
if (numBColumns != 1){
hostB[i + j* numBColumns] = cos((j*PI_d*i / (numBColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numBRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numBColumns, j + 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
else{
hostB[i + j* numBColumns] =1;
}
}
}
}
//DCT II
if (strcmp (two,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
hostB[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
//DCT III
if (strcmp (three,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
hostB[i + j* numBColumns] = cos(((2 * i + 1) / (2.0 * numBColumns))*PI_d*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
//DCT IV
if (strcmp (four,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
hostB[i + j* numBColumns] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 2;
}
}
}
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
mxFree(input_buf1);
mxFree(input_buf2);
}
// column........................................................................................................
if (strcmp (column,input_buf2) == 0)
{
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
// if ((nrhs!=1)) {
// mexErrMsgIdAndTxt(errId, errMsg);
//}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
numDCOSRows=numDCOSColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n");
return;
}
// numDCOSRows=numDCOSColumns=numAColumns;
// numCRows = numARows;
// numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
// DCT I
if (strcmp (one,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
if (numDCOSRows != 1){
pointer[i* numDCOSColumns + j] = cos((j*PI_d*i / (numDCOSRows - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numDCOSRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numDCOSColumns, j + 1)))*sqrt(2.0 / numDCOSColumns);
}
else{
pointer[i* numDCOSColumns + j] =1;
}
}
}
}
// DCT II
if (strcmp (two,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
pointer[i* numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
// DCT III
if (strcmp (three,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
pointer[i* numDCOSColumns + j] = cos(((2 * i + 1) / (2.0 * numDCOSColumns))*PI_d*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
// DCT IV
if (strcmp (four,input_buf1) == 0){
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i + j* numBColumns] = cos(((2 * j + 1)*3.14*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
pointer[i* numDCOSColumns + j] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numDCOSColumns)))*sqrt(2.0 / numDCOSColumns);
//hostB[i + j* numBColumns] = 2;
}
}
}
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
//(hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
DCT_GPUA<< <dimGrid, dimBlock >> >(d_DCOS, d_A, d_B, numDCOSRows, numDCOSColumns, numARows, numAColumns, numCRows, numCColumns);
// cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
//cudaDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n");
return;
}
// numBRows = numBColumns = numAColumns;
// numCRows = numARows;
//
// numCColumns = numBColumns;
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Discrete Cosine Transform in Columns wise
//DCT I
if (strcmp (one,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
if (numBRows != 1){
hostB[i* numBColumns + j] = cos((j*PI_d*i / (numBRows - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numBRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numBColumns, j + 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
//hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
else{
hostB[i* numBColumns + j] =1;
}
}
}
}
//DCT II
if (strcmp (two,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i + j* numBColumns] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numBColumns);
hostB[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
//hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
//DCT III
if (strcmp (three,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i + j* numBColumns] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numBColumns);
hostB[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*PI_d*j)*sqrt(1.0 / (1 + DELTA(1, j + 1)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
//hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
}
//DCT IV
if (strcmp (four,input_buf1) == 0){
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i + j* numBColumns] = cos(((2 * j + 1)*3.14*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
hostB[i* numBColumns + j] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
//hostB[i + j* numBColumns] = 2;
}
}
}
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
// (hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform( hostB, hostA, pointer, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
mxFree(input_buf1);
mxFree(input_buf2);
}
}
|
a69ebf0ce15e7d9345417837ff2dcd97e2cbdea7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*Code For Timing Atomic Operations
This program is used for the previous quiz where Dave will ask you to measure the speed and accuracy of non-atomic and atomic increments. No need to modify the code in any way (although you are welcome to change the code and try different things out).
*/
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 10000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **) &d_array, ARRAY_BYTES);
hipMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
// Instructions: This program is needed for the next quiz
// uncomment increment_naive to measure speed and accuracy
// of non-atomic increments or uncomment increment_atomic to
// measure speed and accuracy of atomic icrements
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
hipLaunchKernelGGL(( increment_atomic), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
timer.Stop();
// copy back the array of sums from GPU and print
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
hipFree(d_array);
return 0;
} | a69ebf0ce15e7d9345417837ff2dcd97e2cbdea7.cu | /*Code For Timing Atomic Operations
This program is used for the previous quiz where Dave will ask you to measure the speed and accuracy of non-atomic and atomic increments. No need to modify the code in any way (although you are welcome to change the code and try different things out).
*/
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 10000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
// Instructions: This program is needed for the next quiz
// uncomment increment_naive to measure speed and accuracy
// of non-atomic increments or uncomment increment_atomic to
// measure speed and accuracy of atomic icrements
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.Stop();
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
cudaFree(d_array);
return 0;
} |
980a386995f19fb5418595e43402f2100d7c9cfc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
//__global__CPUGPU
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
//<<<...>>>
//
//<<<...>>>
//GPU10
hipLaunchKernelGGL(( helloFromGPU) , dim3(1), dim3(10), 0, 0, );
hipDeviceReset();
return 0;
} | 980a386995f19fb5418595e43402f2100d7c9cfc.cu | #include <stdio.h>
//__global__修飾子は,この関数がCPUから呼び出され,GPUで実行されることをコンパイラに認識させる.
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
//<<<...>>>はホストスレッドからのデバイスコードの呼び出しを指定する,
//カーネルは一連のスレッドによって実行され,すべてのスレッドが同じコードを実行する.
//<<<...>>>で囲まれているパラメータは,このカー熱を実行するスレッドの数を指定する.
//この例では,GPUスレッドを10個実行することになる.
helloFromGPU <<<1, 10>>>();
cudaDeviceReset();
return 0;
} |
cutoff6overlap.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef ENABLE_CURD
#include<curd_lib_host.h>
#endif
/***************************************************************************
*cr
*cr (C) Copyright 2008-2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "atom.h"
#include "cutoff.h"
#include "parboil.h"
#ifdef __DEVICE_EMULATION__
#define DEBUG
/* define which grid block and which thread to examine */
#define BX 0
#define BY 0
#define TX 0
#define TY 0
#define TZ 0
#define EMU(code) do { \
if (blockIdx.x==BX && blockIdx.y==BY && \
threadIdx.x==TX && threadIdx.y==TY && threadIdx.z==TZ) { \
code; \
} \
} while (0)
#define INT(n) printf("%s = %d\n", #n, n)
#define FLOAT(f) printf("%s = %g\n", #f, (double)(f))
#define INT3(n) printf("%s = %d %d %d\n", #n, (n).x, (n).y, (n).z)
#define FLOAT4(f) printf("%s = %g %g %g %g\n", #f, (double)(f).x, \
(double)(f).y, (double)(f).z, (double)(f).w)
#else
#define EMU(code)
#define INT(n)
#define FLOAT(f)
#define INT3(n)
#define FLOAT4(f)
#endif
/* report error from CUDA */
#define CUERR \
do { \
hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \
return -1; \
} \
} while (0)
/*
* neighbor list:
* stored in constant memory as table of offsets
* flat index addressing is computed by kernel
*
* reserve enough memory for 11^3 stencil of grid cells
* this fits within 16K of memory
*/
#define NBRLIST_DIM 11
#define NBRLIST_MAXLEN (NBRLIST_DIM * NBRLIST_DIM * NBRLIST_DIM)
__constant__ int NbrListLen;
__constant__ int3 NbrList[NBRLIST_MAXLEN];
/* Normally, we're summing electrostatic potential. However, for
* profiling we may want to appropriate this storage to count the
* number of nearby atoms, instead.
*/
#undef NEIGHBOR_COUNT
#ifndef NEIGHBOR_COUNT
typedef float ener_t;
#else
typedef int ener_t;
#endif
/*
* atom bins cached into shared memory for processing
*
* this reserves 4K of shared memory for 32 atom bins each containing 8 atoms,
* should permit scheduling of up to 3 thread blocks per SM
*/
#define BIN_DEPTH 8 /* max number of atoms per bin */
#define BIN_SIZE 32 /* size of bin in floats */
#define BIN_SHIFT 5 /* # of bits to shift for mul/div by BIN_SIZE */
#define BIN_CACHE_MAXLEN 32 /* max number of atom bins to cache */
#define BIN_LENGTH 4.f /* spatial length in Angstroms */
#define BIN_INVLEN (1.f / BIN_LENGTH)
/* assuming density of 1 atom / 10 A^3, expectation is 6.4 atoms per bin
* so that bin fill should be 80% (for non-empty regions of space) */
#define REGION_SIZE 512 /* number of floats in lattice region */
/*
* potential lattice is decomposed into size 8^3 lattice point "regions"
*
* THIS IMPLEMENTATION: one thread per lattice point
* thread block size 128 gives 4 thread blocks per region
* kernel is invoked for each x-y plane of regions,
* where gridDim.x is 4*(x region dimension) so that blockIdx.x
* can absorb the z sub-region index in its 2 lowest order bits
*
* Regions are stored contiguously in memory in row-major order
*
* The bins have to not only cover the region, but they need to surround
* the outer edges so that region sides and corners can still use
* neighbor list stencil. The binZeroAddr is actually a shifted pointer into
* the bin array (binZeroAddr = binBaseAddr + (c*binDim_y + c)*binDim_x + c)
* where c = ceil(cutoff / binsize). This allows for negative offsets to
* be added to myBinIndex.
*
* The (0,0,0) spatial origin corresponds to lower left corner of both
* regionZeroAddr and binZeroAddr. The atom coordinates are translated
* during binning to enforce this assumption.
*/
__global__ static void cuda_cutoff_potential_lattice6overlap(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
ener_t *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex
)
{
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ ener_t *myRegionAddr;
__shared__ int3 myBinIndex;
const int xRegionIndex = blockIdx.x;
const int yRegionIndex = blockIdx.y;
/* thread id */
const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x
+ threadIdx.x;
/* blockDim.x == 8, blockDim.y == 2, blockDim.z == 8 */
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*gridDim.y
+ yRegionIndex)*gridDim.x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + threadIdx.x) * h;
float y = (8 * yRegionIndex + threadIdx.y) * h;
float z = (8 * zRegionIndex + threadIdx.z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
#ifndef NEIGHBOR_COUNT
ener_t energy0 = 0.f;
ener_t energy1 = 0.f;
ener_t energy2 = 0.f;
ener_t energy3 = 0.f;
#else
ener_t energy0 = 0, energy1 = 0, energy2 = 0, energy3 = 0;
#endif
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr)
+ (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
int i;
for (i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
#else
energy0 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
#else
energy1 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
#else
energy2 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
#else
energy3 += (r2 < cutoff2);
#endif
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
extern "C" int gpu_compute_cutoff_potential_lattice6overlap(
struct pb_TimerSet *timers, /* for measuring execution time */
Lattice *lattice,
float cutoff, /* cutoff distance */
Atoms *atoms, /* array of atoms */
int verbose /* print info/debug messages */
)
{
int nx = lattice->dim.nx;
int ny = lattice->dim.ny;
int nz = lattice->dim.nz;
float xlo = lattice->dim.lo.x;
float ylo = lattice->dim.lo.y;
float zlo = lattice->dim.lo.z;
float h = lattice->dim.h;
int natoms = atoms->size;
Atom *atom = atoms->atoms;
int3 nbrlist[NBRLIST_MAXLEN];
int nbrlistlen = 0;
int binHistoFull[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int binHistoCover[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int num_excluded = 0;
int xRegionDim, yRegionDim, zRegionDim;
int xRegionIndex, yRegionIndex, zRegionIndex;
int xOffset, yOffset, zOffset;
int lnx, lny, lnz, lnall;
ener_t *regionZeroAddr, *thisRegion;
ener_t *regionZeroCuda;
int index, indexRegion;
int c;
int3 binDim;
int nbins;
float4 *binBaseAddr, *binZeroAddr;
float4 *binBaseCuda, *binZeroCuda;
int *bincntBaseAddr, *bincntZeroAddr;
Atoms *extra = NULL;
int i, j, k, n;
int sum, total;
float avgFillFull, avgFillCover;
const float cutoff2 = cutoff * cutoff;
const float inv_cutoff2 = 1.f / cutoff2;
dim3 gridDim, blockDim;
#ifdef NEIGHBOR_COUNT
double neighbor_count = 0; /* used to profile the number of atoms near a
* lattice point */
#endif
// Caller has made the 'compute' timer active
/* pad lattice to be factor of 8 in each dimension */
xRegionDim = (int) ceilf(nx/8.f);
yRegionDim = (int) ceilf(ny/8.f);
zRegionDim = (int) ceilf(nz/8.f);
lnx = 8 * xRegionDim;
lny = 8 * yRegionDim;
lnz = 8 * zRegionDim;
lnall = lnx * lny * lnz;
/* will receive energies from CUDA */
regionZeroAddr = (ener_t *) malloc(lnall * sizeof(float));
/* create bins */
c = (int) ceil(cutoff * BIN_INVLEN); /* count extra bins around lattice */
binDim.x = (int) ceil(lnx * h * BIN_INVLEN) + 2*c;
binDim.y = (int) ceil(lny * h * BIN_INVLEN) + 2*c;
binDim.z = (int) ceil(lnz * h * BIN_INVLEN) + 2*c;
nbins = binDim.x * binDim.y * binDim.z;
binBaseAddr = (float4 *) calloc(nbins * BIN_DEPTH, sizeof(float4));
binZeroAddr = binBaseAddr + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
bincntBaseAddr = (int *) calloc(nbins, sizeof(int));
bincntZeroAddr = bincntBaseAddr + (c * binDim.y + c) * binDim.x + c;
/* create neighbor list */
if (ceilf(BIN_LENGTH / (8*h)) == floorf(BIN_LENGTH / (8*h))) {
float s = sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 1 cell */
if (2*c + 1 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-1)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else if (8*h <= 2*BIN_LENGTH) {
float s = 2.f*sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 3-cube of cells */
if (2*c + 3 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-3)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else {
fprintf(stderr, "must have h <= %f\n", 0.25 * BIN_LENGTH);
return -1;
}
/* perform geometric hashing of atoms into bins */
{
/* array of extra atoms, permit average of one extra per bin */
Atom *extra_atoms = (Atom *) calloc(nbins, sizeof(Atom));
int extra_len = 0;
for (n = 0; n < natoms; n++) {
float4 p;
p.x = atom[n].x - xlo;
p.y = atom[n].y - ylo;
p.z = atom[n].z - zlo;
p.w = atom[n].q;
i = (int) floorf(p.x * BIN_INVLEN);
j = (int) floorf(p.y * BIN_INVLEN);
k = (int) floorf(p.z * BIN_INVLEN);
if (i >= -c && i < binDim.x - c &&
j >= -c && j < binDim.y - c &&
k >= -c && k < binDim.z - c &&
atom[n].q != 0) {
int index = (k * binDim.y + j) * binDim.x + i;
float4 *bin = binZeroAddr + index * BIN_DEPTH;
int bindex = bincntZeroAddr[index];
if (bindex < BIN_DEPTH) {
/* copy atom into bin and increase counter for this bin */
bin[bindex] = p;
bincntZeroAddr[index]++;
}
else {
/* add index to array of extra atoms to be computed with CPU */
if (extra_len >= nbins) {
fprintf(stderr, "exceeded space for storing extra atoms\n");
return -1;
}
extra_atoms[extra_len] = atom[n];
extra_len++;
}
}
else {
/* excluded atoms are either outside bins or neutrally charged */
num_excluded++;
}
}
/* Save result */
extra = (Atoms *)malloc(sizeof(Atoms));
extra->atoms = extra_atoms;
extra->size = extra_len;
}
/* bin stats */
sum = total = 0;
for (n = 0; n < nbins; n++) {
binHistoFull[ bincntBaseAddr[n] ]++;
sum += bincntBaseAddr[n];
total += BIN_DEPTH;
}
avgFillFull = sum / (float) total;
sum = total = 0;
for (k = 0; k < binDim.z - 2*c; k++) {
for (j = 0; j < binDim.y - 2*c; j++) {
for (i = 0; i < binDim.x - 2*c; i++) {
int index = (k * binDim.y + j) * binDim.x + i;
binHistoCover[ bincntZeroAddr[index] ]++;
sum += bincntZeroAddr[index];
total += BIN_DEPTH;
}
}
}
avgFillCover = sum / (float) total;
if (verbose) {
/* report */
printf("number of atoms = %d\n", natoms);
printf("lattice spacing = %g\n", h);
printf("cutoff distance = %g\n", cutoff);
printf("\n");
printf("requested lattice dimensions = %d %d %d\n", nx, ny, nz);
printf("requested space dimensions = %g %g %g\n", nx*h, ny*h, nz*h);
printf("expanded lattice dimensions = %d %d %d\n", lnx, lny, lnz);
printf("expanded space dimensions = %g %g %g\n", lnx*h, lny*h, lnz*h);
printf("number of bytes for lattice data = %u\n", lnall*sizeof(float));
printf("\n");
printf("bin padding thickness = %d\n", c);
printf("bin cover dimensions = %d %d %d\n",
binDim.x - 2*c, binDim.y - 2*c, binDim.z - 2*c);
printf("bin full dimensions = %d %d %d\n", binDim.x, binDim.y, binDim.z);
printf("number of bins = %d\n", nbins);
printf("total number of atom slots = %d\n", nbins * BIN_DEPTH);
printf("%% overhead space = %g\n",
(natoms / (double) (nbins * BIN_DEPTH)) * 100);
printf("number of bytes for bin data = %u\n",
nbins * BIN_DEPTH * sizeof(float4));
printf("\n");
printf("bin histogram with padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoFull[n]);
sum += binHistoFull[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillFull * 100);
printf("\n");
printf("bin histogram excluding padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoCover[n]);
sum += binHistoCover[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillCover * 100);
printf("\n");
printf("number of extra atoms = %d\n", extra->size);
printf("%% atoms that are extra = %g\n", (extra->size / (double) natoms) * 100);
printf("\n");
/* sanity check on bins */
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoFull[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram with edges: "
"sum + others = %d\n", sum);
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoCover[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram excluding edges: "
"sum + others = %d\n", sum);
printf("\n");
/* neighbor list */
printf("neighbor list length = %d\n", nbrlistlen);
printf("\n");
}
/* setup CUDA kernel parameters */
gridDim.x = xRegionDim;
gridDim.y = yRegionDim;
gridDim.z = 1;
blockDim.x = 8;
blockDim.y = 2;
blockDim.z = 8;
/* allocate and initialize memory on CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
if (verbose) {
printf("Allocating %.2fMB on CUDA device for potentials\n",
lnall * sizeof(float) / (double) (1024*1024));
}
hipMalloc((void **) ®ionZeroCuda, lnall * sizeof(ener_t));
CUERR;
hipMemset(regionZeroCuda, 0, lnall * sizeof(ener_t));
CUERR;
if (verbose) {
printf("Allocating %.2fMB on CUDA device for atom bins\n",
nbins * BIN_DEPTH * sizeof(float4) / (double) (1024*1024));
}
hipMalloc((void **) &binBaseCuda, nbins * BIN_DEPTH * sizeof(float4));
CUERR;
hipMemcpy(binBaseCuda, binBaseAddr, nbins * BIN_DEPTH * sizeof(float4),
hipMemcpyHostToDevice);
CUERR;
binZeroCuda = binBaseCuda + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
hipMemcpyToSymbol(NbrListLen, &nbrlistlen, sizeof(int), 0);
CUERR;
hipMemcpyToSymbol(NbrList, nbrlist, nbrlistlen * sizeof(int3), 0);
CUERR;
if (verbose)
printf("\n");
hipStream_t cutoffstream;
hipStreamCreate(&cutoffstream);
/* loop over z-dimension, invoke CUDA kernel for each x-y plane */
pb_SwitchToTimer(timers, pb_TimerID_KERNEL);
printf("Invoking CUDA kernel on %d region planes...\n", zRegionDim);
for (zRegionIndex = 0; zRegionIndex < zRegionDim; zRegionIndex++) {
printf(" computing plane %d\r", zRegionIndex);
fflush(stdout);
#ifdef ENABLE_CURD
allocateReadWriteSets(gridDim, blockDim);
#endif
hipLaunchKernelGGL(( cuda_cutoff_potential_lattice6overlap), dim3(gridDim), dim3(blockDim), 0, 0, binDim.x, binDim.y,
binZeroCuda, h, cutoff2, inv_cutoff2, regionZeroCuda, zRegionIndex);
#ifdef ENABLE_CURD
freeReadWriteSets(gridDim, blockDim);
#endif
}
/*
* handle extra atoms on the CPU, concurrently with the GPU calculations
*/
pb_SwitchToTimer(timers, pb_TimerID_COMPUTE);
if (extra->size > 0) {
printf("computing extra atoms on CPU\n");
if (cpu_compute_cutoff_potential_lattice(lattice, cutoff, extra)) {
fprintf(stderr, "cpu_compute_cutoff_potential_lattice() failed "
"for extra atoms\n");
return -1;
}
printf("\n");
}
hipStreamSynchronize(cutoffstream);
CUERR;
hipDeviceSynchronize();
hipStreamDestroy(cutoffstream);
printf("Finished CUDA kernel calls \n");
/* copy result regions from CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
hipMemcpy(regionZeroAddr, regionZeroCuda, lnall * sizeof(ener_t),
hipMemcpyDeviceToHost);
CUERR;
/* free CUDA memory allocations */
hipFree(regionZeroCuda);
hipFree(binBaseCuda);
/*
* transpose on CPU, updating, producing the final lattice
*/
/* transpose regions back into lattice */
pb_SwitchToTimer(timers, pb_TimerID_COMPUTE);
for (k = 0; k < nz; k++) {
zRegionIndex = (k >> 3);
zOffset = (k & 7);
for (j = 0; j < ny; j++) {
yRegionIndex = (j >> 3);
yOffset = (j & 7);
for (i = 0; i < nx; i++) {
xRegionIndex = (i >> 3);
xOffset = (i & 7);
thisRegion = regionZeroAddr
+ ((zRegionIndex * yRegionDim + yRegionIndex) * xRegionDim
+ xRegionIndex) * REGION_SIZE;
indexRegion = (zOffset * 8 + yOffset) * 8 + xOffset;
index = (k * ny + j) * nx + i;
#ifndef NEIGHBOR_COUNT
lattice->lattice[index] += thisRegion[indexRegion];
#else
neighbor_count += thisRegion[indexRegion];
#endif
}
}
}
#ifdef NEIGHBOR_COUNT
printf("Neighbor count: %f\n", (float)neighbor_count);
#endif
/* cleanup memory allocations */
free(regionZeroAddr);
free(binBaseAddr);
free(bincntBaseAddr);
free_atom(extra);
return 0;
}
| cutoff6overlap.cu | #ifdef ENABLE_CURD
#include<curd_lib_host.h>
#endif
/***************************************************************************
*cr
*cr (C) Copyright 2008-2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "atom.h"
#include "cutoff.h"
#include "parboil.h"
#ifdef __DEVICE_EMULATION__
#define DEBUG
/* define which grid block and which thread to examine */
#define BX 0
#define BY 0
#define TX 0
#define TY 0
#define TZ 0
#define EMU(code) do { \
if (blockIdx.x==BX && blockIdx.y==BY && \
threadIdx.x==TX && threadIdx.y==TY && threadIdx.z==TZ) { \
code; \
} \
} while (0)
#define INT(n) printf("%s = %d\n", #n, n)
#define FLOAT(f) printf("%s = %g\n", #f, (double)(f))
#define INT3(n) printf("%s = %d %d %d\n", #n, (n).x, (n).y, (n).z)
#define FLOAT4(f) printf("%s = %g %g %g %g\n", #f, (double)(f).x, \
(double)(f).y, (double)(f).z, (double)(f).w)
#else
#define EMU(code)
#define INT(n)
#define FLOAT(f)
#define INT3(n)
#define FLOAT4(f)
#endif
/* report error from CUDA */
#define CUERR \
do { \
cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
return -1; \
} \
} while (0)
/*
* neighbor list:
* stored in constant memory as table of offsets
* flat index addressing is computed by kernel
*
* reserve enough memory for 11^3 stencil of grid cells
* this fits within 16K of memory
*/
#define NBRLIST_DIM 11
#define NBRLIST_MAXLEN (NBRLIST_DIM * NBRLIST_DIM * NBRLIST_DIM)
__constant__ int NbrListLen;
__constant__ int3 NbrList[NBRLIST_MAXLEN];
/* Normally, we're summing electrostatic potential. However, for
* profiling we may want to appropriate this storage to count the
* number of nearby atoms, instead.
*/
#undef NEIGHBOR_COUNT
#ifndef NEIGHBOR_COUNT
typedef float ener_t;
#else
typedef int ener_t;
#endif
/*
* atom bins cached into shared memory for processing
*
* this reserves 4K of shared memory for 32 atom bins each containing 8 atoms,
* should permit scheduling of up to 3 thread blocks per SM
*/
#define BIN_DEPTH 8 /* max number of atoms per bin */
#define BIN_SIZE 32 /* size of bin in floats */
#define BIN_SHIFT 5 /* # of bits to shift for mul/div by BIN_SIZE */
#define BIN_CACHE_MAXLEN 32 /* max number of atom bins to cache */
#define BIN_LENGTH 4.f /* spatial length in Angstroms */
#define BIN_INVLEN (1.f / BIN_LENGTH)
/* assuming density of 1 atom / 10 A^3, expectation is 6.4 atoms per bin
* so that bin fill should be 80% (for non-empty regions of space) */
#define REGION_SIZE 512 /* number of floats in lattice region */
/*
* potential lattice is decomposed into size 8^3 lattice point "regions"
*
* THIS IMPLEMENTATION: one thread per lattice point
* thread block size 128 gives 4 thread blocks per region
* kernel is invoked for each x-y plane of regions,
* where gridDim.x is 4*(x region dimension) so that blockIdx.x
* can absorb the z sub-region index in its 2 lowest order bits
*
* Regions are stored contiguously in memory in row-major order
*
* The bins have to not only cover the region, but they need to surround
* the outer edges so that region sides and corners can still use
* neighbor list stencil. The binZeroAddr is actually a shifted pointer into
* the bin array (binZeroAddr = binBaseAddr + (c*binDim_y + c)*binDim_x + c)
* where c = ceil(cutoff / binsize). This allows for negative offsets to
* be added to myBinIndex.
*
* The (0,0,0) spatial origin corresponds to lower left corner of both
* regionZeroAddr and binZeroAddr. The atom coordinates are translated
* during binning to enforce this assumption.
*/
__global__ static void cuda_cutoff_potential_lattice6overlap(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
ener_t *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex
)
{
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ ener_t *myRegionAddr;
__shared__ int3 myBinIndex;
const int xRegionIndex = blockIdx.x;
const int yRegionIndex = blockIdx.y;
/* thread id */
const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x
+ threadIdx.x;
/* blockDim.x == 8, blockDim.y == 2, blockDim.z == 8 */
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*gridDim.y
+ yRegionIndex)*gridDim.x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + threadIdx.x) * h;
float y = (8 * yRegionIndex + threadIdx.y) * h;
float z = (8 * zRegionIndex + threadIdx.z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
#ifndef NEIGHBOR_COUNT
ener_t energy0 = 0.f;
ener_t energy1 = 0.f;
ener_t energy2 = 0.f;
ener_t energy3 = 0.f;
#else
ener_t energy0 = 0, energy1 = 0, energy2 = 0, energy3 = 0;
#endif
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr)
+ (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
int i;
for (i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
#else
energy0 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
#else
energy1 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
#else
energy2 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
#else
energy3 += (r2 < cutoff2);
#endif
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
extern "C" int gpu_compute_cutoff_potential_lattice6overlap(
struct pb_TimerSet *timers, /* for measuring execution time */
Lattice *lattice,
float cutoff, /* cutoff distance */
Atoms *atoms, /* array of atoms */
int verbose /* print info/debug messages */
)
{
int nx = lattice->dim.nx;
int ny = lattice->dim.ny;
int nz = lattice->dim.nz;
float xlo = lattice->dim.lo.x;
float ylo = lattice->dim.lo.y;
float zlo = lattice->dim.lo.z;
float h = lattice->dim.h;
int natoms = atoms->size;
Atom *atom = atoms->atoms;
int3 nbrlist[NBRLIST_MAXLEN];
int nbrlistlen = 0;
int binHistoFull[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int binHistoCover[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int num_excluded = 0;
int xRegionDim, yRegionDim, zRegionDim;
int xRegionIndex, yRegionIndex, zRegionIndex;
int xOffset, yOffset, zOffset;
int lnx, lny, lnz, lnall;
ener_t *regionZeroAddr, *thisRegion;
ener_t *regionZeroCuda;
int index, indexRegion;
int c;
int3 binDim;
int nbins;
float4 *binBaseAddr, *binZeroAddr;
float4 *binBaseCuda, *binZeroCuda;
int *bincntBaseAddr, *bincntZeroAddr;
Atoms *extra = NULL;
int i, j, k, n;
int sum, total;
float avgFillFull, avgFillCover;
const float cutoff2 = cutoff * cutoff;
const float inv_cutoff2 = 1.f / cutoff2;
dim3 gridDim, blockDim;
#ifdef NEIGHBOR_COUNT
double neighbor_count = 0; /* used to profile the number of atoms near a
* lattice point */
#endif
// Caller has made the 'compute' timer active
/* pad lattice to be factor of 8 in each dimension */
xRegionDim = (int) ceilf(nx/8.f);
yRegionDim = (int) ceilf(ny/8.f);
zRegionDim = (int) ceilf(nz/8.f);
lnx = 8 * xRegionDim;
lny = 8 * yRegionDim;
lnz = 8 * zRegionDim;
lnall = lnx * lny * lnz;
/* will receive energies from CUDA */
regionZeroAddr = (ener_t *) malloc(lnall * sizeof(float));
/* create bins */
c = (int) ceil(cutoff * BIN_INVLEN); /* count extra bins around lattice */
binDim.x = (int) ceil(lnx * h * BIN_INVLEN) + 2*c;
binDim.y = (int) ceil(lny * h * BIN_INVLEN) + 2*c;
binDim.z = (int) ceil(lnz * h * BIN_INVLEN) + 2*c;
nbins = binDim.x * binDim.y * binDim.z;
binBaseAddr = (float4 *) calloc(nbins * BIN_DEPTH, sizeof(float4));
binZeroAddr = binBaseAddr + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
bincntBaseAddr = (int *) calloc(nbins, sizeof(int));
bincntZeroAddr = bincntBaseAddr + (c * binDim.y + c) * binDim.x + c;
/* create neighbor list */
if (ceilf(BIN_LENGTH / (8*h)) == floorf(BIN_LENGTH / (8*h))) {
float s = sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 1 cell */
if (2*c + 1 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-1)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else if (8*h <= 2*BIN_LENGTH) {
float s = 2.f*sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 3-cube of cells */
if (2*c + 3 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-3)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else {
fprintf(stderr, "must have h <= %f\n", 0.25 * BIN_LENGTH);
return -1;
}
/* perform geometric hashing of atoms into bins */
{
/* array of extra atoms, permit average of one extra per bin */
Atom *extra_atoms = (Atom *) calloc(nbins, sizeof(Atom));
int extra_len = 0;
for (n = 0; n < natoms; n++) {
float4 p;
p.x = atom[n].x - xlo;
p.y = atom[n].y - ylo;
p.z = atom[n].z - zlo;
p.w = atom[n].q;
i = (int) floorf(p.x * BIN_INVLEN);
j = (int) floorf(p.y * BIN_INVLEN);
k = (int) floorf(p.z * BIN_INVLEN);
if (i >= -c && i < binDim.x - c &&
j >= -c && j < binDim.y - c &&
k >= -c && k < binDim.z - c &&
atom[n].q != 0) {
int index = (k * binDim.y + j) * binDim.x + i;
float4 *bin = binZeroAddr + index * BIN_DEPTH;
int bindex = bincntZeroAddr[index];
if (bindex < BIN_DEPTH) {
/* copy atom into bin and increase counter for this bin */
bin[bindex] = p;
bincntZeroAddr[index]++;
}
else {
/* add index to array of extra atoms to be computed with CPU */
if (extra_len >= nbins) {
fprintf(stderr, "exceeded space for storing extra atoms\n");
return -1;
}
extra_atoms[extra_len] = atom[n];
extra_len++;
}
}
else {
/* excluded atoms are either outside bins or neutrally charged */
num_excluded++;
}
}
/* Save result */
extra = (Atoms *)malloc(sizeof(Atoms));
extra->atoms = extra_atoms;
extra->size = extra_len;
}
/* bin stats */
sum = total = 0;
for (n = 0; n < nbins; n++) {
binHistoFull[ bincntBaseAddr[n] ]++;
sum += bincntBaseAddr[n];
total += BIN_DEPTH;
}
avgFillFull = sum / (float) total;
sum = total = 0;
for (k = 0; k < binDim.z - 2*c; k++) {
for (j = 0; j < binDim.y - 2*c; j++) {
for (i = 0; i < binDim.x - 2*c; i++) {
int index = (k * binDim.y + j) * binDim.x + i;
binHistoCover[ bincntZeroAddr[index] ]++;
sum += bincntZeroAddr[index];
total += BIN_DEPTH;
}
}
}
avgFillCover = sum / (float) total;
if (verbose) {
/* report */
printf("number of atoms = %d\n", natoms);
printf("lattice spacing = %g\n", h);
printf("cutoff distance = %g\n", cutoff);
printf("\n");
printf("requested lattice dimensions = %d %d %d\n", nx, ny, nz);
printf("requested space dimensions = %g %g %g\n", nx*h, ny*h, nz*h);
printf("expanded lattice dimensions = %d %d %d\n", lnx, lny, lnz);
printf("expanded space dimensions = %g %g %g\n", lnx*h, lny*h, lnz*h);
printf("number of bytes for lattice data = %u\n", lnall*sizeof(float));
printf("\n");
printf("bin padding thickness = %d\n", c);
printf("bin cover dimensions = %d %d %d\n",
binDim.x - 2*c, binDim.y - 2*c, binDim.z - 2*c);
printf("bin full dimensions = %d %d %d\n", binDim.x, binDim.y, binDim.z);
printf("number of bins = %d\n", nbins);
printf("total number of atom slots = %d\n", nbins * BIN_DEPTH);
printf("%% overhead space = %g\n",
(natoms / (double) (nbins * BIN_DEPTH)) * 100);
printf("number of bytes for bin data = %u\n",
nbins * BIN_DEPTH * sizeof(float4));
printf("\n");
printf("bin histogram with padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoFull[n]);
sum += binHistoFull[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillFull * 100);
printf("\n");
printf("bin histogram excluding padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoCover[n]);
sum += binHistoCover[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillCover * 100);
printf("\n");
printf("number of extra atoms = %d\n", extra->size);
printf("%% atoms that are extra = %g\n", (extra->size / (double) natoms) * 100);
printf("\n");
/* sanity check on bins */
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoFull[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram with edges: "
"sum + others = %d\n", sum);
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoCover[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram excluding edges: "
"sum + others = %d\n", sum);
printf("\n");
/* neighbor list */
printf("neighbor list length = %d\n", nbrlistlen);
printf("\n");
}
/* setup CUDA kernel parameters */
gridDim.x = xRegionDim;
gridDim.y = yRegionDim;
gridDim.z = 1;
blockDim.x = 8;
blockDim.y = 2;
blockDim.z = 8;
/* allocate and initialize memory on CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
if (verbose) {
printf("Allocating %.2fMB on CUDA device for potentials\n",
lnall * sizeof(float) / (double) (1024*1024));
}
cudaMalloc((void **) ®ionZeroCuda, lnall * sizeof(ener_t));
CUERR;
cudaMemset(regionZeroCuda, 0, lnall * sizeof(ener_t));
CUERR;
if (verbose) {
printf("Allocating %.2fMB on CUDA device for atom bins\n",
nbins * BIN_DEPTH * sizeof(float4) / (double) (1024*1024));
}
cudaMalloc((void **) &binBaseCuda, nbins * BIN_DEPTH * sizeof(float4));
CUERR;
cudaMemcpy(binBaseCuda, binBaseAddr, nbins * BIN_DEPTH * sizeof(float4),
cudaMemcpyHostToDevice);
CUERR;
binZeroCuda = binBaseCuda + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
cudaMemcpyToSymbol(NbrListLen, &nbrlistlen, sizeof(int), 0);
CUERR;
cudaMemcpyToSymbol(NbrList, nbrlist, nbrlistlen * sizeof(int3), 0);
CUERR;
if (verbose)
printf("\n");
cudaStream_t cutoffstream;
cudaStreamCreate(&cutoffstream);
/* loop over z-dimension, invoke CUDA kernel for each x-y plane */
pb_SwitchToTimer(timers, pb_TimerID_KERNEL);
printf("Invoking CUDA kernel on %d region planes...\n", zRegionDim);
for (zRegionIndex = 0; zRegionIndex < zRegionDim; zRegionIndex++) {
printf(" computing plane %d\r", zRegionIndex);
fflush(stdout);
#ifdef ENABLE_CURD
allocateReadWriteSets(gridDim, blockDim);
#endif
cuda_cutoff_potential_lattice6overlap<<<gridDim, blockDim, 0>>>(binDim.x, binDim.y,
binZeroCuda, h, cutoff2, inv_cutoff2, regionZeroCuda, zRegionIndex);
#ifdef ENABLE_CURD
freeReadWriteSets(gridDim, blockDim);
#endif
}
/*
* handle extra atoms on the CPU, concurrently with the GPU calculations
*/
pb_SwitchToTimer(timers, pb_TimerID_COMPUTE);
if (extra->size > 0) {
printf("computing extra atoms on CPU\n");
if (cpu_compute_cutoff_potential_lattice(lattice, cutoff, extra)) {
fprintf(stderr, "cpu_compute_cutoff_potential_lattice() failed "
"for extra atoms\n");
return -1;
}
printf("\n");
}
cudaStreamSynchronize(cutoffstream);
CUERR;
cudaThreadSynchronize();
cudaStreamDestroy(cutoffstream);
printf("Finished CUDA kernel calls \n");
/* copy result regions from CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
cudaMemcpy(regionZeroAddr, regionZeroCuda, lnall * sizeof(ener_t),
cudaMemcpyDeviceToHost);
CUERR;
/* free CUDA memory allocations */
cudaFree(regionZeroCuda);
cudaFree(binBaseCuda);
/*
* transpose on CPU, updating, producing the final lattice
*/
/* transpose regions back into lattice */
pb_SwitchToTimer(timers, pb_TimerID_COMPUTE);
for (k = 0; k < nz; k++) {
zRegionIndex = (k >> 3);
zOffset = (k & 7);
for (j = 0; j < ny; j++) {
yRegionIndex = (j >> 3);
yOffset = (j & 7);
for (i = 0; i < nx; i++) {
xRegionIndex = (i >> 3);
xOffset = (i & 7);
thisRegion = regionZeroAddr
+ ((zRegionIndex * yRegionDim + yRegionIndex) * xRegionDim
+ xRegionIndex) * REGION_SIZE;
indexRegion = (zOffset * 8 + yOffset) * 8 + xOffset;
index = (k * ny + j) * nx + i;
#ifndef NEIGHBOR_COUNT
lattice->lattice[index] += thisRegion[indexRegion];
#else
neighbor_count += thisRegion[indexRegion];
#endif
}
}
}
#ifdef NEIGHBOR_COUNT
printf("Neighbor count: %f\n", (float)neighbor_count);
#endif
/* cleanup memory allocations */
free(regionZeroAddr);
free(binBaseAddr);
free(bincntBaseAddr);
free_atom(extra);
return 0;
}
|
3463d7b25e910d4f0073d8417ee21f38912724e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "sw_stage2_common.inc.cu"
int4 goal_found(int4 *d_4out, int blocks) {
static int4 h_4out[PHASE2_BLOCKS_COUNT];
cutilSafeCall(hipMemcpy(h_4out, d_4out, blocks*sizeof(int4), hipMemcpyDeviceToHost));
for (int k=0; k<blocks; k++) {
//printf("%d: %d %d %d %d : near goal\n", k, h_4out[k].x, h_4out[k].y, h_4out[k].w, h_4out[k].z);
if (h_4out[k].x > 0) {
printf("GOAL END!\n");
return h_4out[k];
}
}
return make_int4(0,0,0,0);
}
int4 match_found(int seq0_len, int4 *d_match_out, int j0r, int i0r, int goal, int baseXr, int step, bus_t* h_busBase, bus_t* d_busBase, bus_t* d_outV, int blocks) {
static int4 h_match_out[ALPHA];
dim3 grid(ALPHA, 1, 1);
dim3 threads(PHASE2_THREADS_COUNT, 1, 1);
int i_pos = (seq0_len-i0r) - ALPHA*PHASE2_THREADS_COUNT*(step-blocks+1)-2; // TODO porque s funciona com -2 ??
//printf("j_pos %d\n", j_pos);
//printf("seq0_len{%d} - i0{%d} + (step - blocks){%d} * PHASE2_THREADS_COUNT * ALPHA = %d\n", seq0_len, i0, (step - blocks), seq0_len - i0 + (step - blocks) * PHASE2_THREADS_COUNT * ALPHA+1);
int adjust = 0;
if (i_pos < 0) {
adjust = -i_pos; // Do not access negative offset memory at h_busBase
}
if (PRINT) printf("i_pos %d (adjust: %d)\n", i_pos, adjust);
// TODO FIXME acho que (ALPHA*PHASE2_THREADS_COUNT-adjust) deve ser (ALPHA*PHASE2_THREADS_COUNT) sem o - adjust
cutilSafeCall( hipMemcpy(d_busBase, &h_busBase[i_pos+adjust], (ALPHA*PHASE2_THREADS_COUNT+1)*sizeof(bus_t), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_match) , dim3(grid), dim3(threads), 0 , 0, seq0_len, d_match_out, i0r + (step - blocks) * PHASE2_THREADS_COUNT * ALPHA, goal, baseXr, d_busBase, d_outV + (step - blocks) * PHASE2_THREADS_COUNT * ALPHA);
cutilSafeCall(hipMemcpy(h_match_out, d_match_out, ALPHA * sizeof (int4), hipMemcpyDeviceToHost));
for (int k = 0; k < ALPHA; k++) {
if (PRINT) printf("%d: %d %d %d %d : goal: %d\n", k, h_match_out[k].x, h_match_out[k].y, h_match_out[k].w, h_match_out[k].z, goal);
if (h_match_out[k].x != 0) {
return h_match_out[k];
}
}
return make_int4(0,0,0,0);
}
/*void init_score(const char* order_blosum, const char score_blosum[24][24], char out_score[128][128]) {
memset(out_score, 0, sizeof(out_score));
for (int i=0; order_blosum[i]; i++) {
for (int j=0; order_blosum[j]; j++) {
char c0 = order_blosum[i];
char c1 = order_blosum[j];
out_score[c0][c1] = score_blosum[i][j];
}
}
}*/
static map<int, SpecialRowWriter*> specialRows; // TODO colocar l em cima
static void unflush_bus(int i0r) {
// show content:
for (map<int, SpecialRowWriter*>::iterator it = specialRows.begin(); it != specialRows.end(); it++) {
int i = (*it).first;
SpecialRowWriter* row = (*it).second;
printf("Unflush: %08X,%08X (%X)\n", i0r, i, row);
/*if (row == NULL) {
printf("NULL row\n");
continue;
}*/
if (i >= i0r) {
row->cancel();
} else {
row->close();
}
delete row;
}
specialRows.clear();
}
static void flush_bus(int reverse, int blocks, int seq0_len, int seq1_len, int i0r, int j0r, Job* job, int step, int baseXr, int *h_split, bus_t* h_busH, bus_t* d_busH) {
#ifdef SW_BUS_FLUSH
if (job->flush_interval > 0) {
for (int k=0; k<blocks && k<=step; k++) {
int bx = k;
const int x0 = h_split[bx];
const int xLen = h_split[bx+1] - x0;
int by = step-bx;
int i=i0r + by*PHASE2_THREADS_COUNT*ALPHA;
if (by % job->flush_interval == 0 && by>0 && i<seq0_len) {
hipStreamSynchronize(0);
//printf("offset:%X+%d len:%d\n", d_busH, x0, xLen*sizeof(cell2_t));
cutilSafeCall(hipMemcpy(h_busH+x0, d_busH+x0, xLen*sizeof(cell2_t), hipMemcpyDeviceToHost));
hipStreamSynchronize(0);
int rowId;
int colId;
if (reverse) {
rowId = seq1_len-j0r;
colId = seq0_len-i;
} else {
rowId = i;
colId = j0r;
}
if (bx==0) {
SpecialRowWriter* row;
row = job->fopenNewSpecialRow(STAGE_2, rowId, colId);
specialRows[i] = row;
row->open();
printf("Flush: %08X,%08X (aux %08X,%08X)\n", rowId, colId, baseXr, i);
}
/*bus_t dummy;
dummy.x = 0x99999999;
dummy.y = 0x99999999;*/
//specialRows[i]->write(&dummy, 1);
specialRows[i]->write(&h_busH[x0], xLen);
//specialRows[i]->write(&dummy, 1);
if (bx==blocks-1) {
//printf("Close: %08X,%08X\n", baseY, j0-i);
SpecialRowWriter* row = specialRows[i];
specialRows.erase(i);
row->close();
delete row;
}
}
}
}
#endif
}
int4 findNearGoal(Job* job, cuda_structures_t* cuda, int reverse, int blocks, int i0r, int j0r, int baseXr, int baseX, int* h_split, bus_t* h_busH, int seq0_len, int seq1_len, int goal, int d, int start_type) {
printf("NEAR GOAL!!!\n");
dim3 grid(blocks, 1, 1);
dim3 threads(PHASE2_THREADS_COUNT, 1, 1);
hipLaunchKernelGGL(( kernel_nw_short_phase<true>), dim3(grid), dim3(threads), 0, 0, i0r, j0r, baseXr, seq0_len, seq1_len, goal, cuda->d_4out, d, cuda->d_busH, cuda->d_busV1, cuda->d_busV2, cuda->d_busV3, cuda->d_outV, start_type);
flush_bus(reverse, blocks, seq0_len, seq1_len, i0r, j0r, job, d, baseX, h_split, h_busH, cuda->d_busH);
hipLaunchKernelGGL(( kernel_nw_long_phase<true>), dim3(grid), dim3(threads), 0, 0, i0r, j0r, baseXr, seq0_len, seq1_len, goal, cuda->d_4out, d, cuda->d_busH, cuda->d_busV1, cuda->d_busV2, cuda->d_busV3, cuda->d_outV, start_type);
return goal_found(cuda->d_4out, blocks);
}
void findFarGoal(Job* job, cuda_structures_t* cuda, int reverse, int blocks, int i0r, int j0r, int baseXr, int baseX, int* h_split, bus_t* h_busH, int seq0_len, int seq1_len, int goal, int d, int start_type) {
dim3 grid(blocks, 1, 1);
dim3 threads(PHASE2_THREADS_COUNT, 1, 1);
//printf("KERNEL_NW_LARGE_QUICK(%d) xLen: %d xLen/B: %d height: %d\n", d, xLen, xLen/blocks, PHASE2_GRID_HEIGHT);
hipLaunchKernelGGL(( kernel_nw_short_phase<false>), dim3(grid), dim3(threads), 0, 0, i0r, j0r, baseXr, seq0_len, seq1_len, goal, cuda->d_4out, d, cuda->d_busH, cuda->d_busV1, cuda->d_busV2, cuda->d_busV3, cuda->d_outV, start_type);
flush_bus(reverse, blocks, seq0_len, seq1_len, i0r, j0r, job, d, baseXr, h_split, h_busH, cuda->d_busH);
hipLaunchKernelGGL(( kernel_nw_long_phase<false>), dim3(grid), dim3(threads), 0, 0, i0r, j0r, baseXr, seq0_len, seq1_len, goal, cuda->d_4out, d, cuda->d_busH, cuda->d_busV1, cuda->d_busV2, cuda->d_busV3, cuda->d_outV, start_type);
}
midpoint_t find_next_midpoint(Job* job, Sequence* seq_vertical, Sequence* seq_horizontal, int reverse, midpoint_t midpoint, host_structures_t &host, cuda_structures_t &cuda, SpecialRowReader* specialRow) {
midpoint_t next_midpoint;
const int seq0_len = seq_vertical->getLen();
const int seq1_len = seq_horizontal->getLen();
const int bus_size = seq1_len*sizeof(bus_t);
const int baseX = specialRow==NULL?0:specialRow->getRow();
/*if (specialRow != NULL) {
baseX = specialRow->getRow();
} else {
baseX = 0;
}*/
int i0r;
int j0r;
int baseXr;
if (reverse) {
i0r = seq0_len - midpoint.i;
j0r = seq1_len - midpoint.j;
baseXr = seq1_len - baseX;
if (baseXr > seq1_len) baseXr = seq1_len;
} else {
i0r = midpoint.i-1; // TODO VALIDAR
j0r = midpoint.j-1; // TODO VALIDAR
baseXr = baseX;
}
cutilSafeCall( hipMemset(cuda.d_4out, 0, sizeof(int4)));
const int xLen = baseXr-j0r+1; // inclusive baseX
{
dim3 threads(512,1,1);
dim3 blocks(PHASE2_BLOCKS_COUNT,1,1);
printf("kernel_initialize_busH<<<%d, %d>>>(..., j0r:%d, xLen:%d midpoint.type:%d)\n", threads.x, blocks.x, j0r, xLen, midpoint.type);
hipLaunchKernelGGL(( kernel_initialize_busH), dim3(threads), dim3(blocks), 0, 0, cuda.d_busH, j0r, xLen, midpoint.type);
cutilCheckMsg("Kernel execution failed");
/*cutilSafeCall ( hipMemcpy ( host.h_busH, cuda.d_busH+j0r, xLen * sizeof ( cell2_t ), hipMemcpyDeviceToHost ) );
for (int i=0; i<xLen; i++) {
printf("%02d ", host.h_busH[i]);
if (i%10 == 0) printf("\n");
}
printf("\n");*/
}
cutilSafeCall(hipBindTexture(0, t_busH, cuda.d_busH, bus_size));
int blocks = MULTIPROCESSORS*2;
if ( xLen <= 2*blocks*PHASE2_THREADS_COUNT ) {
blocks = xLen/2/PHASE2_THREADS_COUNT;
if (blocks > MULTIPROCESSORS) {
blocks = (blocks/MULTIPROCESSORS)*MULTIPROCESSORS;
}
if (blocks <= 1) {
blocks = 1;
}
}
printf ( "SIZES xLen: %d B: %d xLen/B: %d 2*height: %d %s\n",
xLen, blocks, xLen/blocks, 2*PHASE2_THREADS_COUNT, blocks==1?"ERROR":"OK" );
int h = ( midpoint.i/PHASE2_THREADS_COUNT/ALPHA+blocks+1 ); // TODO validar
int pend1;
if (specialRow != NULL) {
specialRow->open ( host.h_busBase, midpoint.i );
pend1 = specialRow->read ( host.h_busBase, midpoint.i - ALPHA*PHASE2_THREADS_COUNT );
} else {
// TODO memset?
}
int h_split[PHASE2_BLOCKS_COUNT + 1];
createSplitPositions ( j0r, xLen, h_split, blocks );
cutilSafeCall ( hipMemcpyToSymbol ( d_split, h_split, sizeof ( h_split ) ) );
// TODO analisar casos limitrofes. ex.: (i0,j0)=(1,1) - perfect match
int d;
dim3 threads( PHASE2_THREADS_COUNT, 1, 1);
printf ( "i0r: %d , j0r: %d, baseXr: %d\n", i0r, j0r, baseXr );
printf ( "i0r: %d , j0r: %d, baseXr: %d\n", seq0_len-i0r-1, seq1_len-j0r-1, seq1_len-baseXr-1 );
for ( d=0; d<h; d++ ) {
if ( blocks == 1 ) {
dim3 grid ( 1, 1, 1 );
if ( midpoint.score <= xLen*DNA_MATCH ) {
hipLaunchKernelGGL(( kernel_nw_single_phase<PHASE2_THREADS_COUNT, true>), dim3(grid), dim3(threads), 0, 0, i0r, j0r, baseXr, seq0_len, seq1_len, midpoint.score, cuda.d_4out, d, cuda.d_busH, cuda.d_busV1, cuda.d_busV2, cuda.d_busV3, cuda.d_outV, midpoint.type );
int4 found = goal_found ( cuda.d_4out , blocks);
if ( found.x > 0 ) {
next_midpoint.i = seq0_len-found.y-1;
next_midpoint.j = seq1_len-found.x-1;
next_midpoint.score = found.w;
next_midpoint.type = found.z;
printf ( "GOAL END (%d,%d)!\n", next_midpoint.i, next_midpoint.j );
break;
}
} else {
hipLaunchKernelGGL(( kernel_nw_single_phase<PHASE2_THREADS_COUNT, false>), dim3(grid), dim3(threads), 0, 0, i0r, j0r, baseXr, seq0_len, seq1_len, midpoint.score, cuda.d_4out, d, cuda.d_busH, cuda.d_busV1, cuda.d_busV2, cuda.d_busV3, cuda.d_outV, midpoint.type );
}
} else {
//printf ( "GOAL: %d MAX: %d\n", midpoint.score, ( xLen+1 ) *DNA_MATCH );
if ( midpoint.score <= ( xLen+1 ) *DNA_MATCH ) {
int4 found = findNearGoal ( job, &cuda, reverse, blocks, i0r, j0r, baseXr, baseX, h_split, host.h_busH, seq0_len, seq1_len, midpoint.score, d, midpoint.type );
if ( found.x > 0 ) {
next_midpoint.i = seq0_len-found.y-1;
next_midpoint.j = seq1_len-found.x-1;
next_midpoint.score = found.w;
next_midpoint.type = found.z;
printf ( "GOAL END (%d,%d)!\n", next_midpoint.i, next_midpoint.j );
break;
}
} else {
findFarGoal ( job, &cuda, reverse, blocks, i0r, j0r, baseXr, baseX, h_split, host.h_busH, seq0_len, seq1_len, midpoint.score, d, midpoint.type );
}
}
if (specialRow != NULL) {
pend1 = specialRow->read ( host.h_busBase, midpoint.i - ALPHA*PHASE2_THREADS_COUNT* ( d+2 ) -1 ); // TODO precisa do -1?
} else {
// TODO memset?
}
//int blocks = PHASE2_BLOCKS_COUNT;
if ( d >= blocks ) {
hipStreamSynchronize ( 0 );
if ( PRINT ) {
printDebugMatch ( &cuda, baseXr, seq0_len, 0, reverse, i0r, j0r, seq_vertical, seq_horizontal, d, blocks, seq0_len, &host );
}
// TODO no precisa testar se baseX==0, pois ele j deveria encontrar no metodo goal_found
int4 found = match_found ( seq0_len, cuda.d_match_out, j0r, i0r, midpoint.score, baseXr, d, host.h_busBase, cuda.d_busBase, cuda.d_outV, blocks );
if ( found.x < 0 ) {
fprintf ( stderr, "ERROR: Backtrace lost! Can't continue." );
exit ( 1 );
} else if ( found.x > 0 ) {
next_midpoint.i = seq0_len-found.y;
next_midpoint.j = seq1_len-found.x;
next_midpoint.score = found.w;
next_midpoint.type = found.z;
break;
}
}
}
long long cells_updates = 0;
for (int i=1; i<=d && i<=blocks; i++) {
long long delta_h = h_split[i]-h_split[0];
cells_updates += delta_h * ALPHA * PHASE2_THREADS_COUNT;
}
if (d >= blocks) {
long long delta_h = h_split[blocks]-h_split[0];
cells_updates += (d-blocks+1)*delta_h * ALPHA * PHASE2_THREADS_COUNT;
}
job->cells_updates += cells_updates;
fprintf ( stdout, "D:%d BLOCKS:%d xLen: %d cells: %lld total(%.f mi)\n", d,blocks,xLen, cells_updates, job->cells_updates/1000000.0f);
if ( d == h ) {
fprintf ( stderr, "ERROR: Backtrace lost! End of matrix reached." );
exit ( 1 );
}
//cutilSafeCall(hipUnbindTexture(t_busH)); // TODO necessario?
return next_midpoint;
}
void stage2(Job* job) {
FILE* stats = job->fopenStatistics(STAGE_2);
stats = stderr;
job->loadMidpoints(0);
job->loadSpecialRows(STAGE_1);
job->cells_updates = 0;
int reverse = 1;
Sequence* seq_vertical;
Sequence* seq_horizontal;
if (reverse) {
seq_vertical = new Sequence(job->seq[1], reverse);
seq_horizontal = new Sequence(job->seq[0], reverse);
} else {
seq_vertical = new Sequence(job->seq[0], reverse);
seq_horizontal = new Sequence(job->seq[1], reverse);
}
/*printf("A: %X\n", seq_vertical);
printf("B: %d\n", seq_vertical->getLen());
printf("C: %c %d\n", seq_vertical->forward_data[0], strlen(seq_vertical->forward_data));
printf("D: %.5s\n", seq_vertical->forward_data+seq_vertical->getLen()-5);
printf("seq_vertical: (%d) %.5s ... %.5s\n", seq_vertical->getLen(), seq_vertical->forward_data, seq_vertical->forward_data+seq_vertical->getLen()-6);
printf("seq_horizontal: (%d) %.5s ... %.5s\n", seq_horizontal->getLen(), seq_horizontal->forward_data, seq_horizontal->forward_data+seq_horizontal->getLen()-5);*/
int seq0_len = seq_vertical->getLen();
int seq1_len = seq_horizontal->getLen();
MidpointsFile* midpointsFile = job->fopenMidpointsFile(1);
midpoint_t midpoint = job->midpoints.back();
if (reverse) {
int aux = midpoint.i;
midpoint.i = midpoint.j;
midpoint.j = aux;
} else {
//midpoint.i = job->phase2_i0;
//midpoint.j = job->phase2_j0;
}
//midpoint.score = job->phase2_max;
//midpoint.type = 0;
if (job->flush_limit > 0) {
int max_i;
int max_j;
job->getLargestSpecialRowInterval(STAGE_1, &max_i, &max_j);
// Necessary if we do not have any special row.
if (max_i == 0) {
max_i = midpoint.i;
}
int max_len = midpoint.j;
int maximum_recomended_flush_interval = max_i/4/(THREADS_COUNT*ALPHA);
fprintf(stats, "Maximum special row distance: %lld\n", max_i);
fprintf(stats, "Maximum recomended flush interval: %lld\n", maximum_recomended_flush_interval);
//job->flush_interval = 6;
job->flush_interval = (max_len*8LL/(job->flush_limit/max_i))/(THREADS_COUNT*ALPHA)+1; // TODO constante no lugar de 8
if (job->flush_interval > maximum_recomended_flush_interval) {
fprintf(stats, "Reducing Flush Interval from %lld to %lld\n", job->flush_interval, maximum_recomended_flush_interval);
// TODO comentei para nao influenciar os testes
//job->flush_interval = maximum_recomended_flush_interval;
// TODO tratar com um warning, com exit? como ajuste? como erro? verificar
// TODO fazer o mesmo na fase 1?
}
long long special_lines_count = (max_len/(THREADS_COUNT*ALPHA*job->flush_interval));
fprintf(stats, "Special columns: %lld\n", special_lines_count);
fprintf(stats, "Total size: %lld\n", special_lines_count*max_i*8LL); // TODO 8*/
} else {
job->flush_interval = 0;
job->flush_limit = 0;
}
fprintf(stats, "Flush Interval: %d\n", job->flush_interval);
fprintf(stats, "Flush limit: %lld\n", job->flush_limit);
fprintf(stats, "SW PARAM: %d/%d/%d/%d\n", DNA_MATCH, DNA_MISMATCH, DNA_GAP_FIRST, DNA_GAP_EXT);
fprintf(stats, "--Alignment sequences:\n", job);
fprintf(stats, ">%s (%d)\n", job->seq[0].name.c_str(), seq0_len);
fprintf(stats, ">%s (%d)\n", job->seq[1].name.c_str(), seq1_len);
fflush(stats);
selectGPU(job->gpu, stats);
Timer timer2;
hipEvent_t ev_step = timer2.createEvent("STEP");
hipEvent_t ev_start = timer2.createEvent("START");
hipEvent_t ev_end = timer2.createEvent("END");
hipEvent_t ev_copy = timer2.createEvent("COPY");
hipEvent_t ev_alloc = timer2.createEvent("ALLOC");
hipEvent_t ev_kernel = timer2.createEvent("KERNEL");
hipEvent_t ev_writeback = timer2.createEvent("WRITEBACK");
printDevMem(stats);
timer2.eventRecord(ev_start);
host_structures_t host;
cuda_structures_t cuda;
allocHostStructures(seq_vertical, seq_horizontal, &host);
allocCudaStructures(seq_vertical, seq_horizontal, &cuda);
timer2.eventRecord(ev_copy);
timer2.eventRecord(ev_alloc);
int line_index=0;
midpointsFile->write(midpoint.j, midpoint.i, midpoint.score, midpoint.type);
while (midpoint.score > 0) {
fprintf(stdout, ">> %d %d %d\n", midpoint.j, midpoint.i, midpoint.score);
fprintf(stdout, "Millions Cells Updates: %.3f\n", job->cells_updates/1000000.0f);
SpecialRowReader* specialRow = job->fopenNextSpecialRow(STAGE_1, midpoint.j, midpoint.i, PHASE2_THREADS_COUNT, &line_index); // TODO inverter j0 com i0?
midpoint = find_next_midpoint(job, seq_vertical, seq_horizontal, reverse, midpoint, host, cuda, specialRow);
if (specialRow != NULL) specialRow->close();
#ifdef SW_BUS_FLUSH
unflush_bus(seq0_len-midpoint.i-1);
#endif
midpointsFile->write(midpoint.j, midpoint.i, midpoint.score- (midpoint.type==0?0:DNA_GAP_OPEN), midpoint.type);
}
//////////
timer2.eventRecord(ev_kernel);
midpointsFile->close();
fprintf(stats, "CUDA times:\n");
float diff = timer2.printStatistics(stats);
fprintf(stats, " total: %.4f\n", diff);
fprintf(stats, " Mi.Cells: %.4e\n", (float)job->cells_updates);
fprintf(stats, " MCUPS: %.4f\n", job->cells_updates/1000000.0f/(diff/1000.0f));
printDevMem(stats);
fprintf(stats, " FreeCudaStructures\n");
freeCudaStructures(&cuda);
freeHostStructures(&host);
printDevMem(stats);
fclose(stats);
hipDeviceReset();
cutilCheckMsg("hipDeviceReset failed");
}
//#include "sw_stage3.cu" | 3463d7b25e910d4f0073d8417ee21f38912724e5.cu | #include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "sw_stage2_common.inc.cu"
int4 goal_found(int4 *d_4out, int blocks) {
static int4 h_4out[PHASE2_BLOCKS_COUNT];
cutilSafeCall(cudaMemcpy(h_4out, d_4out, blocks*sizeof(int4), cudaMemcpyDeviceToHost));
for (int k=0; k<blocks; k++) {
//printf("%d: %d %d %d %d : near goal\n", k, h_4out[k].x, h_4out[k].y, h_4out[k].w, h_4out[k].z);
if (h_4out[k].x > 0) {
printf("GOAL END!\n");
return h_4out[k];
}
}
return make_int4(0,0,0,0);
}
int4 match_found(int seq0_len, int4 *d_match_out, int j0r, int i0r, int goal, int baseXr, int step, bus_t* h_busBase, bus_t* d_busBase, bus_t* d_outV, int blocks) {
static int4 h_match_out[ALPHA];
dim3 grid(ALPHA, 1, 1);
dim3 threads(PHASE2_THREADS_COUNT, 1, 1);
int i_pos = (seq0_len-i0r) - ALPHA*PHASE2_THREADS_COUNT*(step-blocks+1)-2; // TODO porque só funciona com -2 ??
//printf("j_pos %d\n", j_pos);
//printf("seq0_len{%d} - i0{%d} + (step - blocks){%d} * PHASE2_THREADS_COUNT * ALPHA = %d\n", seq0_len, i0, (step - blocks), seq0_len - i0 + (step - blocks) * PHASE2_THREADS_COUNT * ALPHA+1);
int adjust = 0;
if (i_pos < 0) {
adjust = -i_pos; // Do not access negative offset memory at h_busBase
}
if (PRINT) printf("i_pos %d (adjust: %d)\n", i_pos, adjust);
// TODO FIXME acho que (ALPHA*PHASE2_THREADS_COUNT-adjust) deve ser (ALPHA*PHASE2_THREADS_COUNT) sem o - adjust
cutilSafeCall( cudaMemcpy(d_busBase, &h_busBase[i_pos+adjust], (ALPHA*PHASE2_THREADS_COUNT+1)*sizeof(bus_t), cudaMemcpyHostToDevice));
kernel_match <<< grid, threads, 0 >>>(seq0_len, d_match_out, i0r + (step - blocks) * PHASE2_THREADS_COUNT * ALPHA, goal, baseXr, d_busBase, d_outV + (step - blocks) * PHASE2_THREADS_COUNT * ALPHA);
cutilSafeCall(cudaMemcpy(h_match_out, d_match_out, ALPHA * sizeof (int4), cudaMemcpyDeviceToHost));
for (int k = 0; k < ALPHA; k++) {
if (PRINT) printf("%d: %d %d %d %d : goal: %d\n", k, h_match_out[k].x, h_match_out[k].y, h_match_out[k].w, h_match_out[k].z, goal);
if (h_match_out[k].x != 0) {
return h_match_out[k];
}
}
return make_int4(0,0,0,0);
}
/*void init_score(const char* order_blosum, const char score_blosum[24][24], char out_score[128][128]) {
memset(out_score, 0, sizeof(out_score));
for (int i=0; order_blosum[i]; i++) {
for (int j=0; order_blosum[j]; j++) {
char c0 = order_blosum[i];
char c1 = order_blosum[j];
out_score[c0][c1] = score_blosum[i][j];
}
}
}*/
static map<int, SpecialRowWriter*> specialRows; // TODO colocar lá em cima
static void unflush_bus(int i0r) {
// show content:
for (map<int, SpecialRowWriter*>::iterator it = specialRows.begin(); it != specialRows.end(); it++) {
int i = (*it).first;
SpecialRowWriter* row = (*it).second;
printf("Unflush: %08X,%08X (%X)\n", i0r, i, row);
/*if (row == NULL) {
printf("NULL row\n");
continue;
}*/
if (i >= i0r) {
row->cancel();
} else {
row->close();
}
delete row;
}
specialRows.clear();
}
static void flush_bus(int reverse, int blocks, int seq0_len, int seq1_len, int i0r, int j0r, Job* job, int step, int baseXr, int *h_split, bus_t* h_busH, bus_t* d_busH) {
#ifdef SW_BUS_FLUSH
if (job->flush_interval > 0) {
for (int k=0; k<blocks && k<=step; k++) {
int bx = k;
const int x0 = h_split[bx];
const int xLen = h_split[bx+1] - x0;
int by = step-bx;
int i=i0r + by*PHASE2_THREADS_COUNT*ALPHA;
if (by % job->flush_interval == 0 && by>0 && i<seq0_len) {
cudaStreamSynchronize(0);
//printf("offset:%X+%d len:%d\n", d_busH, x0, xLen*sizeof(cell2_t));
cutilSafeCall(cudaMemcpy(h_busH+x0, d_busH+x0, xLen*sizeof(cell2_t), cudaMemcpyDeviceToHost));
cudaStreamSynchronize(0);
int rowId;
int colId;
if (reverse) {
rowId = seq1_len-j0r;
colId = seq0_len-i;
} else {
rowId = i;
colId = j0r;
}
if (bx==0) {
SpecialRowWriter* row;
row = job->fopenNewSpecialRow(STAGE_2, rowId, colId);
specialRows[i] = row;
row->open();
printf("Flush: %08X,%08X (aux %08X,%08X)\n", rowId, colId, baseXr, i);
}
/*bus_t dummy;
dummy.x = 0x99999999;
dummy.y = 0x99999999;*/
//specialRows[i]->write(&dummy, 1);
specialRows[i]->write(&h_busH[x0], xLen);
//specialRows[i]->write(&dummy, 1);
if (bx==blocks-1) {
//printf("Close: %08X,%08X\n", baseY, j0-i);
SpecialRowWriter* row = specialRows[i];
specialRows.erase(i);
row->close();
delete row;
}
}
}
}
#endif
}
int4 findNearGoal(Job* job, cuda_structures_t* cuda, int reverse, int blocks, int i0r, int j0r, int baseXr, int baseX, int* h_split, bus_t* h_busH, int seq0_len, int seq1_len, int goal, int d, int start_type) {
printf("NEAR GOAL!!!\n");
dim3 grid(blocks, 1, 1);
dim3 threads(PHASE2_THREADS_COUNT, 1, 1);
kernel_nw_short_phase<true><<< grid, threads, 0>>>(i0r, j0r, baseXr, seq0_len, seq1_len, goal, cuda->d_4out, d, cuda->d_busH, cuda->d_busV1, cuda->d_busV2, cuda->d_busV3, cuda->d_outV, start_type);
flush_bus(reverse, blocks, seq0_len, seq1_len, i0r, j0r, job, d, baseX, h_split, h_busH, cuda->d_busH);
kernel_nw_long_phase<true><<< grid, threads, 0>>>(i0r, j0r, baseXr, seq0_len, seq1_len, goal, cuda->d_4out, d, cuda->d_busH, cuda->d_busV1, cuda->d_busV2, cuda->d_busV3, cuda->d_outV, start_type);
return goal_found(cuda->d_4out, blocks);
}
void findFarGoal(Job* job, cuda_structures_t* cuda, int reverse, int blocks, int i0r, int j0r, int baseXr, int baseX, int* h_split, bus_t* h_busH, int seq0_len, int seq1_len, int goal, int d, int start_type) {
dim3 grid(blocks, 1, 1);
dim3 threads(PHASE2_THREADS_COUNT, 1, 1);
//printf("KERNEL_NW_LARGE_QUICK(%d) xLen: %d xLen/B: %d height: %d\n", d, xLen, xLen/blocks, PHASE2_GRID_HEIGHT);
kernel_nw_short_phase<false><<< grid, threads, 0>>>(i0r, j0r, baseXr, seq0_len, seq1_len, goal, cuda->d_4out, d, cuda->d_busH, cuda->d_busV1, cuda->d_busV2, cuda->d_busV3, cuda->d_outV, start_type);
flush_bus(reverse, blocks, seq0_len, seq1_len, i0r, j0r, job, d, baseXr, h_split, h_busH, cuda->d_busH);
kernel_nw_long_phase<false><<< grid, threads, 0>>>(i0r, j0r, baseXr, seq0_len, seq1_len, goal, cuda->d_4out, d, cuda->d_busH, cuda->d_busV1, cuda->d_busV2, cuda->d_busV3, cuda->d_outV, start_type);
}
midpoint_t find_next_midpoint(Job* job, Sequence* seq_vertical, Sequence* seq_horizontal, int reverse, midpoint_t midpoint, host_structures_t &host, cuda_structures_t &cuda, SpecialRowReader* specialRow) {
midpoint_t next_midpoint;
const int seq0_len = seq_vertical->getLen();
const int seq1_len = seq_horizontal->getLen();
const int bus_size = seq1_len*sizeof(bus_t);
const int baseX = specialRow==NULL?0:specialRow->getRow();
/*if (specialRow != NULL) {
baseX = specialRow->getRow();
} else {
baseX = 0;
}*/
int i0r;
int j0r;
int baseXr;
if (reverse) {
i0r = seq0_len - midpoint.i;
j0r = seq1_len - midpoint.j;
baseXr = seq1_len - baseX;
if (baseXr > seq1_len) baseXr = seq1_len;
} else {
i0r = midpoint.i-1; // TODO VALIDAR
j0r = midpoint.j-1; // TODO VALIDAR
baseXr = baseX;
}
cutilSafeCall( cudaMemset(cuda.d_4out, 0, sizeof(int4)));
const int xLen = baseXr-j0r+1; // inclusive baseX
{
dim3 threads(512,1,1);
dim3 blocks(PHASE2_BLOCKS_COUNT,1,1);
printf("kernel_initialize_busH<<<%d, %d>>>(..., j0r:%d, xLen:%d midpoint.type:%d)\n", threads.x, blocks.x, j0r, xLen, midpoint.type);
kernel_initialize_busH<<<threads, blocks>>>(cuda.d_busH, j0r, xLen, midpoint.type);
cutilCheckMsg("Kernel execution failed");
/*cutilSafeCall ( cudaMemcpy ( host.h_busH, cuda.d_busH+j0r, xLen * sizeof ( cell2_t ), cudaMemcpyDeviceToHost ) );
for (int i=0; i<xLen; i++) {
printf("%02d ", host.h_busH[i]);
if (i%10 == 0) printf("\n");
}
printf("\n");*/
}
cutilSafeCall(cudaBindTexture(0, t_busH, cuda.d_busH, bus_size));
int blocks = MULTIPROCESSORS*2;
if ( xLen <= 2*blocks*PHASE2_THREADS_COUNT ) {
blocks = xLen/2/PHASE2_THREADS_COUNT;
if (blocks > MULTIPROCESSORS) {
blocks = (blocks/MULTIPROCESSORS)*MULTIPROCESSORS;
}
if (blocks <= 1) {
blocks = 1;
}
}
printf ( "SIZES xLen: %d B: %d xLen/B: %d 2*height: %d %s\n",
xLen, blocks, xLen/blocks, 2*PHASE2_THREADS_COUNT, blocks==1?"ERROR":"OK" );
int h = ( midpoint.i/PHASE2_THREADS_COUNT/ALPHA+blocks+1 ); // TODO validar
int pend1;
if (specialRow != NULL) {
specialRow->open ( host.h_busBase, midpoint.i );
pend1 = specialRow->read ( host.h_busBase, midpoint.i - ALPHA*PHASE2_THREADS_COUNT );
} else {
// TODO memset?
}
int h_split[PHASE2_BLOCKS_COUNT + 1];
createSplitPositions ( j0r, xLen, h_split, blocks );
cutilSafeCall ( cudaMemcpyToSymbol ( d_split, h_split, sizeof ( h_split ) ) );
// TODO analisar casos limitrofes. ex.: (i0,j0)=(1,1) - perfect match
int d;
dim3 threads( PHASE2_THREADS_COUNT, 1, 1);
printf ( "i0r: %d , j0r: %d, baseXr: %d\n", i0r, j0r, baseXr );
printf ( "i0r: %d , j0r: %d, baseXr: %d\n", seq0_len-i0r-1, seq1_len-j0r-1, seq1_len-baseXr-1 );
for ( d=0; d<h; d++ ) {
if ( blocks == 1 ) {
dim3 grid ( 1, 1, 1 );
if ( midpoint.score <= xLen*DNA_MATCH ) {
kernel_nw_single_phase<PHASE2_THREADS_COUNT, true><<< grid, threads, 0>>> ( i0r, j0r, baseXr, seq0_len, seq1_len, midpoint.score, cuda.d_4out, d, cuda.d_busH, cuda.d_busV1, cuda.d_busV2, cuda.d_busV3, cuda.d_outV, midpoint.type );
int4 found = goal_found ( cuda.d_4out , blocks);
if ( found.x > 0 ) {
next_midpoint.i = seq0_len-found.y-1;
next_midpoint.j = seq1_len-found.x-1;
next_midpoint.score = found.w;
next_midpoint.type = found.z;
printf ( "GOAL END (%d,%d)!\n", next_midpoint.i, next_midpoint.j );
break;
}
} else {
kernel_nw_single_phase<PHASE2_THREADS_COUNT, false><<< grid, threads, 0>>> ( i0r, j0r, baseXr, seq0_len, seq1_len, midpoint.score, cuda.d_4out, d, cuda.d_busH, cuda.d_busV1, cuda.d_busV2, cuda.d_busV3, cuda.d_outV, midpoint.type );
}
} else {
//printf ( "GOAL: %d MAX: %d\n", midpoint.score, ( xLen+1 ) *DNA_MATCH );
if ( midpoint.score <= ( xLen+1 ) *DNA_MATCH ) {
int4 found = findNearGoal ( job, &cuda, reverse, blocks, i0r, j0r, baseXr, baseX, h_split, host.h_busH, seq0_len, seq1_len, midpoint.score, d, midpoint.type );
if ( found.x > 0 ) {
next_midpoint.i = seq0_len-found.y-1;
next_midpoint.j = seq1_len-found.x-1;
next_midpoint.score = found.w;
next_midpoint.type = found.z;
printf ( "GOAL END (%d,%d)!\n", next_midpoint.i, next_midpoint.j );
break;
}
} else {
findFarGoal ( job, &cuda, reverse, blocks, i0r, j0r, baseXr, baseX, h_split, host.h_busH, seq0_len, seq1_len, midpoint.score, d, midpoint.type );
}
}
if (specialRow != NULL) {
pend1 = specialRow->read ( host.h_busBase, midpoint.i - ALPHA*PHASE2_THREADS_COUNT* ( d+2 ) -1 ); // TODO precisa do -1?
} else {
// TODO memset?
}
//int blocks = PHASE2_BLOCKS_COUNT;
if ( d >= blocks ) {
cudaStreamSynchronize ( 0 );
if ( PRINT ) {
printDebugMatch ( &cuda, baseXr, seq0_len, 0, reverse, i0r, j0r, seq_vertical, seq_horizontal, d, blocks, seq0_len, &host );
}
// TODO não precisa testar se baseX==0, pois ele já deveria encontrar no metodo goal_found
int4 found = match_found ( seq0_len, cuda.d_match_out, j0r, i0r, midpoint.score, baseXr, d, host.h_busBase, cuda.d_busBase, cuda.d_outV, blocks );
if ( found.x < 0 ) {
fprintf ( stderr, "ERROR: Backtrace lost! Can't continue." );
exit ( 1 );
} else if ( found.x > 0 ) {
next_midpoint.i = seq0_len-found.y;
next_midpoint.j = seq1_len-found.x;
next_midpoint.score = found.w;
next_midpoint.type = found.z;
break;
}
}
}
long long cells_updates = 0;
for (int i=1; i<=d && i<=blocks; i++) {
long long delta_h = h_split[i]-h_split[0];
cells_updates += delta_h * ALPHA * PHASE2_THREADS_COUNT;
}
if (d >= blocks) {
long long delta_h = h_split[blocks]-h_split[0];
cells_updates += (d-blocks+1)*delta_h * ALPHA * PHASE2_THREADS_COUNT;
}
job->cells_updates += cells_updates;
fprintf ( stdout, "D:%d BLOCKS:%d xLen: %d cells: %lld total(%.f mi)\n", d,blocks,xLen, cells_updates, job->cells_updates/1000000.0f);
if ( d == h ) {
fprintf ( stderr, "ERROR: Backtrace lost! End of matrix reached." );
exit ( 1 );
}
//cutilSafeCall(cudaUnbindTexture(t_busH)); // TODO necessario?
return next_midpoint;
}
void stage2(Job* job) {
FILE* stats = job->fopenStatistics(STAGE_2);
stats = stderr;
job->loadMidpoints(0);
job->loadSpecialRows(STAGE_1);
job->cells_updates = 0;
int reverse = 1;
Sequence* seq_vertical;
Sequence* seq_horizontal;
if (reverse) {
seq_vertical = new Sequence(job->seq[1], reverse);
seq_horizontal = new Sequence(job->seq[0], reverse);
} else {
seq_vertical = new Sequence(job->seq[0], reverse);
seq_horizontal = new Sequence(job->seq[1], reverse);
}
/*printf("A: %X\n", seq_vertical);
printf("B: %d\n", seq_vertical->getLen());
printf("C: %c %d\n", seq_vertical->forward_data[0], strlen(seq_vertical->forward_data));
printf("D: %.5s\n", seq_vertical->forward_data+seq_vertical->getLen()-5);
printf("seq_vertical: (%d) %.5s ... %.5s\n", seq_vertical->getLen(), seq_vertical->forward_data, seq_vertical->forward_data+seq_vertical->getLen()-6);
printf("seq_horizontal: (%d) %.5s ... %.5s\n", seq_horizontal->getLen(), seq_horizontal->forward_data, seq_horizontal->forward_data+seq_horizontal->getLen()-5);*/
int seq0_len = seq_vertical->getLen();
int seq1_len = seq_horizontal->getLen();
MidpointsFile* midpointsFile = job->fopenMidpointsFile(1);
midpoint_t midpoint = job->midpoints.back();
if (reverse) {
int aux = midpoint.i;
midpoint.i = midpoint.j;
midpoint.j = aux;
} else {
//midpoint.i = job->phase2_i0;
//midpoint.j = job->phase2_j0;
}
//midpoint.score = job->phase2_max;
//midpoint.type = 0;
if (job->flush_limit > 0) {
int max_i;
int max_j;
job->getLargestSpecialRowInterval(STAGE_1, &max_i, &max_j);
// Necessary if we do not have any special row.
if (max_i == 0) {
max_i = midpoint.i;
}
int max_len = midpoint.j;
int maximum_recomended_flush_interval = max_i/4/(THREADS_COUNT*ALPHA);
fprintf(stats, "Maximum special row distance: %lld\n", max_i);
fprintf(stats, "Maximum recomended flush interval: %lld\n", maximum_recomended_flush_interval);
//job->flush_interval = 6;
job->flush_interval = (max_len*8LL/(job->flush_limit/max_i))/(THREADS_COUNT*ALPHA)+1; // TODO constante no lugar de 8
if (job->flush_interval > maximum_recomended_flush_interval) {
fprintf(stats, "Reducing Flush Interval from %lld to %lld\n", job->flush_interval, maximum_recomended_flush_interval);
// TODO comentei para nao influenciar os testes
//job->flush_interval = maximum_recomended_flush_interval;
// TODO tratar com um warning, com exit? como ajuste? como erro? verificar
// TODO fazer o mesmo na fase 1?
}
long long special_lines_count = (max_len/(THREADS_COUNT*ALPHA*job->flush_interval));
fprintf(stats, "Special columns: %lld\n", special_lines_count);
fprintf(stats, "Total size: %lld\n", special_lines_count*max_i*8LL); // TODO 8*/
} else {
job->flush_interval = 0;
job->flush_limit = 0;
}
fprintf(stats, "Flush Interval: %d\n", job->flush_interval);
fprintf(stats, "Flush limit: %lld\n", job->flush_limit);
fprintf(stats, "SW PARAM: %d/%d/%d/%d\n", DNA_MATCH, DNA_MISMATCH, DNA_GAP_FIRST, DNA_GAP_EXT);
fprintf(stats, "--Alignment sequences:\n", job);
fprintf(stats, ">%s (%d)\n", job->seq[0].name.c_str(), seq0_len);
fprintf(stats, ">%s (%d)\n", job->seq[1].name.c_str(), seq1_len);
fflush(stats);
selectGPU(job->gpu, stats);
Timer timer2;
cudaEvent_t ev_step = timer2.createEvent("STEP");
cudaEvent_t ev_start = timer2.createEvent("START");
cudaEvent_t ev_end = timer2.createEvent("END");
cudaEvent_t ev_copy = timer2.createEvent("COPY");
cudaEvent_t ev_alloc = timer2.createEvent("ALLOC");
cudaEvent_t ev_kernel = timer2.createEvent("KERNEL");
cudaEvent_t ev_writeback = timer2.createEvent("WRITEBACK");
printDevMem(stats);
timer2.eventRecord(ev_start);
host_structures_t host;
cuda_structures_t cuda;
allocHostStructures(seq_vertical, seq_horizontal, &host);
allocCudaStructures(seq_vertical, seq_horizontal, &cuda);
timer2.eventRecord(ev_copy);
timer2.eventRecord(ev_alloc);
int line_index=0;
midpointsFile->write(midpoint.j, midpoint.i, midpoint.score, midpoint.type);
while (midpoint.score > 0) {
fprintf(stdout, ">> %d %d %d\n", midpoint.j, midpoint.i, midpoint.score);
fprintf(stdout, "Millions Cells Updates: %.3f\n", job->cells_updates/1000000.0f);
SpecialRowReader* specialRow = job->fopenNextSpecialRow(STAGE_1, midpoint.j, midpoint.i, PHASE2_THREADS_COUNT, &line_index); // TODO inverter j0 com i0?
midpoint = find_next_midpoint(job, seq_vertical, seq_horizontal, reverse, midpoint, host, cuda, specialRow);
if (specialRow != NULL) specialRow->close();
#ifdef SW_BUS_FLUSH
unflush_bus(seq0_len-midpoint.i-1);
#endif
midpointsFile->write(midpoint.j, midpoint.i, midpoint.score- (midpoint.type==0?0:DNA_GAP_OPEN), midpoint.type);
}
//////////
timer2.eventRecord(ev_kernel);
midpointsFile->close();
fprintf(stats, "CUDA times:\n");
float diff = timer2.printStatistics(stats);
fprintf(stats, " total: %.4f\n", diff);
fprintf(stats, " Mi.Cells: %.4e\n", (float)job->cells_updates);
fprintf(stats, " MCUPS: %.4f\n", job->cells_updates/1000000.0f/(diff/1000.0f));
printDevMem(stats);
fprintf(stats, " FreeCudaStructures\n");
freeCudaStructures(&cuda);
freeHostStructures(&host);
printDevMem(stats);
fclose(stats);
cudaThreadExit();
cutilCheckMsg("cudaThreadExit failed");
}
//#include "sw_stage3.cu" |
54b37a145426cff925b13e122e490de7ca76a803.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <jitify_helper.cuh>
#include <kernels/gauge_stout.cuh>
#include <instantiate.h>
namespace quda {
template <typename Float, int nColor, QudaReconstructType recon> class GaugeSTOUT : TunableVectorYZ
{
static constexpr int stoutDim = 3; // apply stouting in space only
GaugeSTOUTArg<Float, nColor, recon, stoutDim> arg;
const GaugeField &meta;
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
// (2,3): 2 for parity in the y thread dim, 3 corresponds to mapping direction to the z thread dim
GaugeSTOUT(GaugeField &out, const GaugeField &in, double rho) :
TunableVectorYZ(2, stoutDim),
arg(out, in, rho),
meta(in)
{
strcpy(aux, meta.AuxString());
strcat(aux, comm_dim_partitioned_string());
#ifdef JITIFY
create_jitify_program("kernels/gauge_stout.cuh");
#endif
apply(0);
qudaDeviceSynchronize();
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef JITIFY
using namespace jitify::reflection;
jitify_error = program->kernel("quda::computeSTOUTStep").instantiate(Type<Arg>())
.configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg);
#else
hipLaunchKernelGGL(( computeSTOUTStep), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, 0, arg);
#endif
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { arg.out.save(); } // defensive measure in case they alias
void postTune() { arg.out.load(); }
long long flops() const { return 3 * (2 + 2 * 4) * 198ll * arg.threads; } // just counts matrix multiplication
long long bytes() const { return 3 * ((1 + 2 * 6) * arg.in.Bytes() + arg.out.Bytes()) * arg.threads; }
}; // GaugeSTOUT
void STOUTStep(GaugeField &out, const GaugeField &in, double rho)
{
#ifdef GPU_GAUGE_TOOLS
checkPrecision(out, in);
checkReconstruct(out, in);
if (!out.isNative()) errorQuda("Order %d with %d reconstruct not supported", in.Order(), in.Reconstruct());
if (!in.isNative()) errorQuda("Order %d with %d reconstruct not supported", out.Order(), out.Reconstruct());
instantiate<GaugeSTOUT>(out, in, rho);
#else
errorQuda("Gauge tools are not built");
#endif
}
template <typename Float, int nColor, QudaReconstructType recon> class GaugeOvrImpSTOUT : TunableVectorYZ
{
static constexpr int stoutDim = 4; // apply stouting in space only
GaugeSTOUTArg<Float, nColor, recon, stoutDim> arg;
const GaugeField &meta;
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
GaugeOvrImpSTOUT(GaugeField &out, const GaugeField &in, double rho, double epsilon) :
TunableVectorYZ(2, stoutDim),
arg(out, in, rho, epsilon),
meta(in)
{
strcpy(aux, meta.AuxString());
strcat(aux, comm_dim_partitioned_string());
#ifdef JITIFY
create_jitify_program("kernels/gauge_stout.cuh");
#endif
apply(0);
qudaDeviceSynchronize();
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef JITIFY
using namespace jitify::reflection;
jitify_error = program->kernel("quda::computeOvrImpSTOUTStep").instantiate(Type<Arg>())
.configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg);
#else
hipLaunchKernelGGL(( computeOvrImpSTOUTStep), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, 0, arg);
#endif
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { arg.out.save(); } // defensive measure in case they alias
void postTune() { arg.out.load(); }
long long flops() const { return 4*(18+2+2*4)*198ll*arg.threads; } // just counts matrix multiplication
long long bytes() const { return 4*((1+2*12)*arg.in.Bytes()+arg.out.Bytes())*arg.threads; }
}; // GaugeOvrImpSTOUT
void OvrImpSTOUTStep(GaugeField &out, const GaugeField& in, double rho, double epsilon)
{
#ifdef GPU_GAUGE_TOOLS
checkPrecision(out, in);
checkReconstruct(out, in);
if (!out.isNative()) errorQuda("Order %d with %d reconstruct not supported", in.Order(), in.Reconstruct());
if (!in.isNative()) errorQuda("Order %d with %d reconstruct not supported", out.Order(), out.Reconstruct());
instantiate<GaugeOvrImpSTOUT>(out, in, rho, epsilon);
#else
errorQuda("Gauge tools are not built");
#endif
}
}
| 54b37a145426cff925b13e122e490de7ca76a803.cu | #include <quda_internal.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <jitify_helper.cuh>
#include <kernels/gauge_stout.cuh>
#include <instantiate.h>
namespace quda {
template <typename Float, int nColor, QudaReconstructType recon> class GaugeSTOUT : TunableVectorYZ
{
static constexpr int stoutDim = 3; // apply stouting in space only
GaugeSTOUTArg<Float, nColor, recon, stoutDim> arg;
const GaugeField &meta;
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
// (2,3): 2 for parity in the y thread dim, 3 corresponds to mapping direction to the z thread dim
GaugeSTOUT(GaugeField &out, const GaugeField &in, double rho) :
TunableVectorYZ(2, stoutDim),
arg(out, in, rho),
meta(in)
{
strcpy(aux, meta.AuxString());
strcat(aux, comm_dim_partitioned_string());
#ifdef JITIFY
create_jitify_program("kernels/gauge_stout.cuh");
#endif
apply(0);
qudaDeviceSynchronize();
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef JITIFY
using namespace jitify::reflection;
jitify_error = program->kernel("quda::computeSTOUTStep").instantiate(Type<Arg>())
.configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg);
#else
computeSTOUTStep<<<tp.grid, tp.block, tp.shared_bytes>>>(arg);
#endif
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { arg.out.save(); } // defensive measure in case they alias
void postTune() { arg.out.load(); }
long long flops() const { return 3 * (2 + 2 * 4) * 198ll * arg.threads; } // just counts matrix multiplication
long long bytes() const { return 3 * ((1 + 2 * 6) * arg.in.Bytes() + arg.out.Bytes()) * arg.threads; }
}; // GaugeSTOUT
void STOUTStep(GaugeField &out, const GaugeField &in, double rho)
{
#ifdef GPU_GAUGE_TOOLS
checkPrecision(out, in);
checkReconstruct(out, in);
if (!out.isNative()) errorQuda("Order %d with %d reconstruct not supported", in.Order(), in.Reconstruct());
if (!in.isNative()) errorQuda("Order %d with %d reconstruct not supported", out.Order(), out.Reconstruct());
instantiate<GaugeSTOUT>(out, in, rho);
#else
errorQuda("Gauge tools are not built");
#endif
}
template <typename Float, int nColor, QudaReconstructType recon> class GaugeOvrImpSTOUT : TunableVectorYZ
{
static constexpr int stoutDim = 4; // apply stouting in space only
GaugeSTOUTArg<Float, nColor, recon, stoutDim> arg;
const GaugeField &meta;
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
GaugeOvrImpSTOUT(GaugeField &out, const GaugeField &in, double rho, double epsilon) :
TunableVectorYZ(2, stoutDim),
arg(out, in, rho, epsilon),
meta(in)
{
strcpy(aux, meta.AuxString());
strcat(aux, comm_dim_partitioned_string());
#ifdef JITIFY
create_jitify_program("kernels/gauge_stout.cuh");
#endif
apply(0);
qudaDeviceSynchronize();
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef JITIFY
using namespace jitify::reflection;
jitify_error = program->kernel("quda::computeOvrImpSTOUTStep").instantiate(Type<Arg>())
.configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg);
#else
computeOvrImpSTOUTStep<<<tp.grid, tp.block, tp.shared_bytes>>>(arg);
#endif
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
void preTune() { arg.out.save(); } // defensive measure in case they alias
void postTune() { arg.out.load(); }
long long flops() const { return 4*(18+2+2*4)*198ll*arg.threads; } // just counts matrix multiplication
long long bytes() const { return 4*((1+2*12)*arg.in.Bytes()+arg.out.Bytes())*arg.threads; }
}; // GaugeOvrImpSTOUT
void OvrImpSTOUTStep(GaugeField &out, const GaugeField& in, double rho, double epsilon)
{
#ifdef GPU_GAUGE_TOOLS
checkPrecision(out, in);
checkReconstruct(out, in);
if (!out.isNative()) errorQuda("Order %d with %d reconstruct not supported", in.Order(), in.Reconstruct());
if (!in.isNative()) errorQuda("Order %d with %d reconstruct not supported", out.Order(), out.Reconstruct());
instantiate<GaugeOvrImpSTOUT>(out, in, rho, epsilon);
#else
errorQuda("Gauge tools are not built");
#endif
}
}
|
4bba0e8d02ee17d1282747ff0aca9fcef597f18b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include <limits.h>
namespace cv { namespace cuda { namespace device
{
namespace stereobm
{
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Stereo BM ////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
#define ROWSperTHREAD 21 // the number of rows a thread will process
#define BLOCK_W 128 // the thread block width (464)
#define N_DISPARITIES 8
#define STEREO_MIND 0 // The minimum d range to check
#define STEREO_DISP_STEP N_DISPARITIES // the d step, must be <= 1 to avoid aliasing
__device__ __forceinline__ int SQ(int a)
{
return a * a;
}
template<int RADIUS>
__device__ unsigned int CalcSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd, const int X, int cwidth)
{
unsigned int cache = 0;
unsigned int cache2 = 0;
if (X < cwidth - RADIUS)
{
for(int i = 1; i <= RADIUS; i++)
cache += col_ssd[i];
}
col_ssd_cache[0] = cache;
__syncthreads();
if (X < cwidth - RADIUS)
{
if (threadIdx.x < BLOCK_W - RADIUS)
cache2 = col_ssd_cache[RADIUS];
else
for(int i = RADIUS + 1; i < (2 * RADIUS + 1); i++)
cache2 += col_ssd[i];
}
return col_ssd[0] + cache + cache2;
}
template<int RADIUS>
__device__ uint2 MinSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd, const int X, int cwidth, unsigned int* ssd)
{
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
ssd[0] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 0 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[1] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 1 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[2] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 2 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[3] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 3 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[4] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 4 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[5] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 5 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[6] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 6 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[7] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 7 * (BLOCK_W + 2 * RADIUS), X, cwidth);
int mssd = ::min(::min(::min(ssd[0], ssd[1]), ::min(ssd[4], ssd[5])), ::min(::min(ssd[2], ssd[3]), ::min(ssd[6], ssd[7])));
int bestIdx = 0;
for (int i = 0; i < N_DISPARITIES; i++)
{
if (mssd == ssd[i])
bestIdx = i;
}
return make_uint2(mssd, bestIdx);
}
template<int RADIUS>
__device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
{
unsigned char leftPixel1;
unsigned char leftPixel2;
unsigned char rightPixel1[8];
unsigned char rightPixel2[8];
unsigned int diff1, diff2;
leftPixel1 = imageL[idx1];
leftPixel2 = imageL[idx2];
idx1 = idx1 - d;
idx2 = idx2 - d;
rightPixel1[7] = imageR[idx1 - 7];
rightPixel1[0] = imageR[idx1 - 0];
rightPixel1[1] = imageR[idx1 - 1];
rightPixel1[2] = imageR[idx1 - 2];
rightPixel1[3] = imageR[idx1 - 3];
rightPixel1[4] = imageR[idx1 - 4];
rightPixel1[5] = imageR[idx1 - 5];
rightPixel1[6] = imageR[idx1 - 6];
rightPixel2[7] = imageR[idx2 - 7];
rightPixel2[0] = imageR[idx2 - 0];
rightPixel2[1] = imageR[idx2 - 1];
rightPixel2[2] = imageR[idx2 - 2];
rightPixel2[3] = imageR[idx2 - 3];
rightPixel2[4] = imageR[idx2 - 4];
rightPixel2[5] = imageR[idx2 - 5];
rightPixel2[6] = imageR[idx2 - 6];
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
diff1 = leftPixel1 - rightPixel1[0];
diff2 = leftPixel2 - rightPixel2[0];
col_ssd[0 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[1];
diff2 = leftPixel2 - rightPixel2[1];
col_ssd[1 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[2];
diff2 = leftPixel2 - rightPixel2[2];
col_ssd[2 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[3];
diff2 = leftPixel2 - rightPixel2[3];
col_ssd[3 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[4];
diff2 = leftPixel2 - rightPixel2[4];
col_ssd[4 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[5];
diff2 = leftPixel2 - rightPixel2[5];
col_ssd[5 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[6];
diff2 = leftPixel2 - rightPixel2[6];
col_ssd[6 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[7];
diff2 = leftPixel2 - rightPixel2[7];
col_ssd[7 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
}
template<int RADIUS>
__device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
{
unsigned char leftPixel1;
int idx;
unsigned int diffa[] = {0, 0, 0, 0, 0, 0, 0, 0};
for(int i = 0; i < (2 * RADIUS + 1); i++)
{
idx = y_tex * im_pitch + x_tex;
leftPixel1 = imageL[idx];
idx = idx - d;
diffa[0] += SQ(leftPixel1 - imageR[idx - 0]);
diffa[1] += SQ(leftPixel1 - imageR[idx - 1]);
diffa[2] += SQ(leftPixel1 - imageR[idx - 2]);
diffa[3] += SQ(leftPixel1 - imageR[idx - 3]);
diffa[4] += SQ(leftPixel1 - imageR[idx - 4]);
diffa[5] += SQ(leftPixel1 - imageR[idx - 5]);
diffa[6] += SQ(leftPixel1 - imageR[idx - 6]);
diffa[7] += SQ(leftPixel1 - imageR[idx - 7]);
y_tex += 1;
}
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
col_ssd[0 * (BLOCK_W + 2 * RADIUS)] = diffa[0];
col_ssd[1 * (BLOCK_W + 2 * RADIUS)] = diffa[1];
col_ssd[2 * (BLOCK_W + 2 * RADIUS)] = diffa[2];
col_ssd[3 * (BLOCK_W + 2 * RADIUS)] = diffa[3];
col_ssd[4 * (BLOCK_W + 2 * RADIUS)] = diffa[4];
col_ssd[5 * (BLOCK_W + 2 * RADIUS)] = diffa[5];
col_ssd[6 * (BLOCK_W + 2 * RADIUS)] = diffa[6];
col_ssd[7 * (BLOCK_W + 2 * RADIUS)] = diffa[7];
}
template<int RADIUS>
__global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, PtrStepb disp, int maxdisp,
int uniquenessRatio, unsigned int* cminSSDImage, size_t cminSSD_step, int cwidth, int cheight)
{
extern __shared__ unsigned int col_ssd_cache[];
uint line_ssds[2 + N_DISPARITIES]; // +2 - tail of previous batch for accurate uniquenessRatio check
uint* batch_ssds = line_ssds + 2;
uint line_ssd_tails[3*ROWSperTHREAD];
uchar uniqueness_approved[ROWSperTHREAD];
uchar local_disparity[ROWSperTHREAD];
volatile unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x;
volatile unsigned int *col_ssd_extra = threadIdx.x < (2 * RADIUS) ? col_ssd + BLOCK_W : 0;
const int X = (blockIdx.x * BLOCK_W + threadIdx.x + maxdisp + RADIUS);
const int Y = (blockIdx.y * ROWSperTHREAD + RADIUS);
unsigned int* minSSDImage = cminSSDImage + X + Y * cminSSD_step;
unsigned char* disparImage = disp.data + X + Y * disp.step;
float thresh_scale;
int end_row = ::min(ROWSperTHREAD, cheight - Y - RADIUS);
int y_tex;
int x_tex = X - RADIUS;
if (x_tex >= cwidth)
return;
for(int i = 0; i < ROWSperTHREAD; i++)
local_disparity[i] = 0;
for(int i = 0; i < 3*ROWSperTHREAD; i++)
{
line_ssd_tails[i] = UINT_MAX;
}
if (uniquenessRatio > 0)
{
batch_ssds[6] = UINT_MAX;
batch_ssds[7] = UINT_MAX;
thresh_scale = (1.0 + uniquenessRatio / 100.0f);
for(int i = 0; i < ROWSperTHREAD; i++)
{
uniqueness_approved[i] = 1;
}
}
for(int d = STEREO_MIND; d < maxdisp; d += STEREO_DISP_STEP)
{
y_tex = Y - RADIUS;
InitColSSD<RADIUS>(x_tex, y_tex, img_step, left, right, d, col_ssd);
if (col_ssd_extra != nullptr)
if (x_tex + BLOCK_W < cwidth)
InitColSSD<RADIUS>(x_tex + BLOCK_W, y_tex, img_step, left, right, d, col_ssd_extra);
__syncthreads(); //before MinSSD function
if (Y < cheight - RADIUS)
{
uint2 batch_opt = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd, X, cwidth, batch_ssds);
// For threads that do not satisfy the if condition below("X < cwidth - RADIUS"), previously
// computed "batch_opt" value, which is the result of "MinSSD" function call, is not used at all.
//
// However, since the "MinSSD" function has "__syncthreads" call in its body, those threads
// must also call "MinSSD" to avoid deadlock. (#13850)
//
// From CUDA 9, using "__syncwarp" with proper mask value instead of using "__syncthreads"
// could be an option, but the shared memory access pattern does not allow this option,
// resulting in race condition. (Checked via "cuda-memcheck --tool racecheck")
if (X < cwidth - RADIUS)
{
unsigned int last_opt = line_ssd_tails[3*0 + 0];
unsigned int opt = ::min(last_opt, batch_opt.x);
if (uniquenessRatio > 0)
{
line_ssds[0] = line_ssd_tails[3*0 + 1];
line_ssds[1] = line_ssd_tails[3*0 + 2];
float thresh = thresh_scale * opt;
int dtest = local_disparity[0];
if(batch_opt.x < last_opt)
{
uniqueness_approved[0] = 1;
dtest = d + batch_opt.y;
if ((local_disparity[0] < dtest-1 || local_disparity[0] > dtest+1) && (last_opt <= thresh))
{
uniqueness_approved[0] = 0;
}
}
if(uniqueness_approved[0])
{
// the trial to decompose the code on 2 loops without ld vs dtest makes
// uniqueness check dramatically slow. at least on gf 1080
for (int ld = d-2; ld < d + N_DISPARITIES; ld++)
{
if ((ld < dtest-1 || ld > dtest+1) && (line_ssds[ld-d+2] <= thresh))
{
uniqueness_approved[0] = 0;
break;
}
}
}
line_ssd_tails[3*0 + 1] = batch_ssds[6];
line_ssd_tails[3*0 + 2] = batch_ssds[7];
}
line_ssd_tails[3*0 + 0] = opt;
if (batch_opt.x < last_opt)
{
local_disparity[0] = (unsigned char)(d + batch_opt.y);
}
}
}
for(int row = 1; row < end_row; row++)
{
int idx1 = y_tex * img_step + x_tex;
int idx2 = (y_tex + (2 * RADIUS + 1)) * img_step + x_tex;
__syncthreads();
StepDown<RADIUS>(idx1, idx2, left, right, d, col_ssd);
if (col_ssd_extra)
if (x_tex + BLOCK_W < cwidth)
StepDown<RADIUS>(idx1, idx2, left + BLOCK_W, right + BLOCK_W, d, col_ssd_extra);
y_tex += 1;
__syncthreads();
if (row < cheight - RADIUS - Y)
{
uint2 batch_opt = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd, X, cwidth, batch_ssds);
// For threads that do not satisfy the if condition below("X < cwidth - RADIUS"), previously
// computed "batch_opt" value, which is the result of "MinSSD" function call, is not used at all.
//
// However, since the "MinSSD" function has "__syncthreads" call in its body, those threads
// must also call "MinSSD" to avoid deadlock. (#13850)
//
// From CUDA 9, using "__syncwarp" with proper mask value instead of using "__syncthreads"
// could be an option, but the shared memory access pattern does not allow this option,
// resulting in race condition. (Checked via "cuda-memcheck --tool racecheck")
if (X < cwidth - RADIUS)
{
unsigned int last_opt = line_ssd_tails[3*row + 0];
unsigned int opt = ::min(last_opt, batch_opt.x);
if (uniquenessRatio > 0)
{
line_ssds[0] = line_ssd_tails[3*row + 1];
line_ssds[1] = line_ssd_tails[3*row + 2];
float thresh = thresh_scale * opt;
int dtest = local_disparity[row];
if(batch_opt.x < last_opt)
{
uniqueness_approved[row] = 1;
dtest = d + batch_opt.y;
if ((local_disparity[row] < dtest-1 || local_disparity[row] > dtest+1) && (last_opt <= thresh))
{
uniqueness_approved[row] = 0;
}
}
if(uniqueness_approved[row])
{
for (int ld = 0; ld < N_DISPARITIES + 2; ld++)
{
if (((d+ld-2 < dtest-1) || (d+ld-2 > dtest+1)) && (line_ssds[ld] <= thresh))
{
uniqueness_approved[row] = 0;
break;
}
}
}
line_ssd_tails[3*row + 1] = batch_ssds[6];
line_ssd_tails[3*row + 2] = batch_ssds[7];
}
line_ssd_tails[3*row + 0] = opt;
if (batch_opt.x < last_opt)
{
local_disparity[row] = (unsigned char)(d + batch_opt.y);
}
}
}
} // for row loop
__syncthreads(); // before initializing shared memory at the beginning of next loop
} // for d loop
for (int row = 0; row < end_row; row++)
{
minSSDImage[row * cminSSD_step] = line_ssd_tails[3*row + 0];
}
if (uniquenessRatio > 0)
{
for (int row = 0; row < end_row; row++)
{
// drop disparity for pixel where uniqueness requirement was not satisfied (zero value)
disparImage[disp.step * row] = local_disparity[row] * uniqueness_approved[row];
}
}
else
{
for (int row = 0; row < end_row; row++)
{
disparImage[disp.step * row] = local_disparity[row];
}
}
}
template<int RADIUS> void kernel_caller(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp,
int maxdisp, int uniquenessRatio, unsigned int* missd_buffer,
size_t minssd_step, int cwidth, int cheight, hipStream_t & stream)
{
dim3 grid(1,1,1);
dim3 threads(BLOCK_W, 1, 1);
grid.x = divUp(left.cols - maxdisp - 2 * RADIUS, BLOCK_W);
grid.y = divUp(left.rows - 2 * RADIUS, ROWSperTHREAD);
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
size_t smem_size = (BLOCK_W + N_DISPARITIES * (BLOCK_W + 2 * RADIUS)) * sizeof(unsigned int);
hipLaunchKernelGGL(( stereoKernel<RADIUS>), dim3(grid), dim3(threads), smem_size, stream, left.data, right.data, left.step, disp, maxdisp, uniquenessRatio,
missd_buffer, minssd_step, cwidth, cheight);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
};
typedef void (*kernel_caller_t)(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp,
int maxdisp, int uniquenessRatio, unsigned int* missd_buffer,
size_t minssd_step, int cwidth, int cheight, hipStream_t & stream);
const static kernel_caller_t callers[] =
{
0,
kernel_caller< 1>, kernel_caller< 2>, kernel_caller< 3>, kernel_caller< 4>, kernel_caller< 5>,
kernel_caller< 6>, kernel_caller< 7>, kernel_caller< 8>, kernel_caller< 9>, kernel_caller<10>,
kernel_caller<11>, kernel_caller<12>, kernel_caller<13>, kernel_caller<14>, kernel_caller<15>,
kernel_caller<16>, kernel_caller<17>, kernel_caller<18>, kernel_caller<19>, kernel_caller<20>,
kernel_caller<21>, kernel_caller<22>, kernel_caller<23>, kernel_caller<24>, kernel_caller<25>
//0,0,0, 0,0,0, 0,0,kernel_caller<9>
};
const int calles_num = sizeof(callers)/sizeof(callers[0]);
void stereoBM_CUDA(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp,
int winsz, int uniquenessRatio, const PtrStepSz<unsigned int>& minSSD_buf, hipStream_t& stream)
{
int winsz2 = winsz >> 1;
if (winsz2 == 0 || winsz2 >= calles_num)
CV_Error(cv::Error::StsBadArg, "Unsupported window size");
cudaSafeCall( hipMemset2DAsync(disp.data, disp.step, 0, disp.cols, disp.rows, stream) );
cudaSafeCall( hipMemset2DAsync(minSSD_buf.data, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp.rows, stream) );
size_t minssd_step = minSSD_buf.step/minSSD_buf.elemSize();
callers[winsz2](left, right, disp, maxdisp, uniquenessRatio, minSSD_buf.data, minssd_step, left.cols, left.rows, stream);
}
__device__ inline int clamp(int x, int a, int b)
{
return ::max(a, ::min(b, x));
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Sobel Prefiler ///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void prefilter_kernel_xsobel(PtrStepSzb input, PtrStepSzb output, int prefilterCap)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < output.cols && y < output.rows)
{
int conv = input.ptr(::max(0,y-1))[::max(0,x-1)] * (-1) + input.ptr(::max(0, y-1))[::min(x+1, input.cols-1)] * (1) +
input.ptr(y )[::max(0,x-1)] * (-2) + input.ptr(y )[::min(x+1, input.cols-1)] * (2) +
input.ptr(::min(y+1, input.rows-1))[::max(0,x-1)] * (-1) + input.ptr(::min(y+1, input.rows-1))[::min(x+1,input.cols-1)] * (1);
conv = ::min(::min(::max(-prefilterCap, conv), prefilterCap) + prefilterCap, 255);
output.ptr(y)[x] = conv & 0xFF;
}
}
void prefilter_xsobel(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, hipStream_t & stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, threads.y);
hipLaunchKernelGGL(( prefilter_kernel_xsobel), dim3(grid), dim3(threads), 0, stream, input, output, prefilterCap);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Norm Prefiler ///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void prefilter_kernel_norm(PtrStepSzb input, PtrStepSzb output, int prefilterCap, int scale_g, int scale_s, int winsize)
{
// prefilterCap in range 1..63, checked in StereoBMImpl::compute
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int cols = input.cols;
int rows = input.rows;
int WSZ2 = winsize / 2;
if(x < cols && y < rows)
{
int cov1 = input.ptr(::max(y-1, 0))[x] * 1 +
input.ptr(y)[::min(x+1, cols-1)] * 1 + input.ptr(y )[x] * 4 + input.ptr(y)[::min(x+1, cols-1)] * 1 +
input.ptr(::min(y+1, rows-1))[x] * 1;
int cov2 = 0;
for(int i = -WSZ2; i < WSZ2+1; i++)
for(int j = -WSZ2; j < WSZ2+1; j++)
cov2 += input.ptr(clamp(y+i, 0, rows-1))[clamp(x+j, 0, cols-1)];
int res = (cov1*scale_g - cov2*scale_s)>>10;
res = clamp(res, -prefilterCap, prefilterCap) + prefilterCap;
output.ptr(y)[x] = res;
}
}
void prefilter_norm(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, int winsize, hipStream_t & stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, threads.y);
int scale_g = winsize*winsize/8, scale_s = (1024 + scale_g)/(scale_g*2);
scale_g *= scale_s;
hipLaunchKernelGGL(( prefilter_kernel_norm), dim3(grid), dim3(threads), 0, stream, input, output, prefilterCap, scale_g, scale_s, winsize);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////// Textureness filtering ////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
texture<unsigned char, 2, hipReadModeNormalizedFloat> texForTF;
__device__ __forceinline__ float sobel(int x, int y)
{
float conv = tex2D(texForTF, x - 1, y - 1) * (-1) + tex2D(texForTF, x + 1, y - 1) * (1) +
tex2D(texForTF, x - 1, y ) * (-2) + tex2D(texForTF, x + 1, y ) * (2) +
tex2D(texForTF, x - 1, y + 1) * (-1) + tex2D(texForTF, x + 1, y + 1) * (1);
return fabs(conv);
}
__device__ float CalcSums(float *cols, float *cols_cache, int winsz)
{
float cache = 0;
float cache2 = 0;
int winsz2 = winsz/2;
for(int i = 1; i <= winsz2; i++)
cache += cols[i];
cols_cache[0] = cache;
__syncthreads();
if (threadIdx.x < blockDim.x - winsz2)
cache2 = cols_cache[winsz2];
else
for(int i = winsz2 + 1; i < winsz; i++)
cache2 += cols[i];
return cols[0] + cache + cache2;
}
#define RpT (2 * ROWSperTHREAD) // got experimentally
__global__ void textureness_kernel(PtrStepSzb disp, int winsz, float threshold)
{
int winsz2 = winsz/2;
int n_dirty_pixels = (winsz2) * 2;
extern __shared__ float cols_cache[];
float *cols = cols_cache + blockDim.x + threadIdx.x;
float *cols_extra = threadIdx.x < n_dirty_pixels ? cols + blockDim.x : 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int beg_row = blockIdx.y * RpT;
int end_row = ::min(beg_row + RpT, disp.rows);
if (x < disp.cols)
{
int y = beg_row;
float sum = 0;
float sum_extra = 0;
for(int i = y - winsz2; i <= y + winsz2; ++i)
{
sum += sobel(x - winsz2, i);
if (cols_extra)
sum_extra += sobel(x + blockDim.x - winsz2, i);
}
*cols = sum;
if (cols_extra)
*cols_extra = sum_extra;
__syncthreads();
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
if (sum_win < threshold)
disp.data[y * disp.step + x] = 0;
__syncthreads();
for(int y = beg_row + 1; y < end_row; ++y)
{
sum = sum - sobel(x - winsz2, y - winsz2 - 1) + sobel(x - winsz2, y + winsz2);
*cols = sum;
if (cols_extra)
{
sum_extra = sum_extra - sobel(x + blockDim.x - winsz2, y - winsz2 - 1) + sobel(x + blockDim.x - winsz2, y + winsz2);
*cols_extra = sum_extra;
}
__syncthreads();
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
if (sum_win < threshold)
disp.data[y * disp.step + x] = 0;
__syncthreads();
}
}
}
void postfilter_textureness(const PtrStepSzb& input, int winsz, float avgTexturenessThreshold, const PtrStepSzb& disp, hipStream_t & stream)
{
avgTexturenessThreshold *= winsz * winsz;
texForTF.filterMode = hipFilterModeLinear;
texForTF.addressMode[0] = hipAddressModeWrap;
texForTF.addressMode[1] = hipAddressModeWrap;
hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>();
cudaSafeCall( hipBindTexture2D( 0, texForTF, input.data, desc, input.cols, input.rows, input.step ) );
dim3 threads(128, 1, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, RpT);
size_t smem_size = (threads.x + threads.x + (winsz/2) * 2 ) * sizeof(float);
hipLaunchKernelGGL(( textureness_kernel), dim3(grid), dim3(threads), smem_size, stream, disp, winsz, avgTexturenessThreshold);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
cudaSafeCall( hipUnbindTexture (texForTF) );
}
} // namespace stereobm
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
| 4bba0e8d02ee17d1282747ff0aca9fcef597f18b.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include <limits.h>
namespace cv { namespace cuda { namespace device
{
namespace stereobm
{
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Stereo BM ////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
#define ROWSperTHREAD 21 // the number of rows a thread will process
#define BLOCK_W 128 // the thread block width (464)
#define N_DISPARITIES 8
#define STEREO_MIND 0 // The minimum d range to check
#define STEREO_DISP_STEP N_DISPARITIES // the d step, must be <= 1 to avoid aliasing
__device__ __forceinline__ int SQ(int a)
{
return a * a;
}
template<int RADIUS>
__device__ unsigned int CalcSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd, const int X, int cwidth)
{
unsigned int cache = 0;
unsigned int cache2 = 0;
if (X < cwidth - RADIUS)
{
for(int i = 1; i <= RADIUS; i++)
cache += col_ssd[i];
}
col_ssd_cache[0] = cache;
__syncthreads();
if (X < cwidth - RADIUS)
{
if (threadIdx.x < BLOCK_W - RADIUS)
cache2 = col_ssd_cache[RADIUS];
else
for(int i = RADIUS + 1; i < (2 * RADIUS + 1); i++)
cache2 += col_ssd[i];
}
return col_ssd[0] + cache + cache2;
}
template<int RADIUS>
__device__ uint2 MinSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd, const int X, int cwidth, unsigned int* ssd)
{
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
ssd[0] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 0 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[1] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 1 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[2] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 2 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[3] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 3 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[4] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 4 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[5] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 5 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[6] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 6 * (BLOCK_W + 2 * RADIUS), X, cwidth);
__syncthreads();
ssd[7] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 7 * (BLOCK_W + 2 * RADIUS), X, cwidth);
int mssd = ::min(::min(::min(ssd[0], ssd[1]), ::min(ssd[4], ssd[5])), ::min(::min(ssd[2], ssd[3]), ::min(ssd[6], ssd[7])));
int bestIdx = 0;
for (int i = 0; i < N_DISPARITIES; i++)
{
if (mssd == ssd[i])
bestIdx = i;
}
return make_uint2(mssd, bestIdx);
}
template<int RADIUS>
__device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
{
unsigned char leftPixel1;
unsigned char leftPixel2;
unsigned char rightPixel1[8];
unsigned char rightPixel2[8];
unsigned int diff1, diff2;
leftPixel1 = imageL[idx1];
leftPixel2 = imageL[idx2];
idx1 = idx1 - d;
idx2 = idx2 - d;
rightPixel1[7] = imageR[idx1 - 7];
rightPixel1[0] = imageR[idx1 - 0];
rightPixel1[1] = imageR[idx1 - 1];
rightPixel1[2] = imageR[idx1 - 2];
rightPixel1[3] = imageR[idx1 - 3];
rightPixel1[4] = imageR[idx1 - 4];
rightPixel1[5] = imageR[idx1 - 5];
rightPixel1[6] = imageR[idx1 - 6];
rightPixel2[7] = imageR[idx2 - 7];
rightPixel2[0] = imageR[idx2 - 0];
rightPixel2[1] = imageR[idx2 - 1];
rightPixel2[2] = imageR[idx2 - 2];
rightPixel2[3] = imageR[idx2 - 3];
rightPixel2[4] = imageR[idx2 - 4];
rightPixel2[5] = imageR[idx2 - 5];
rightPixel2[6] = imageR[idx2 - 6];
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
diff1 = leftPixel1 - rightPixel1[0];
diff2 = leftPixel2 - rightPixel2[0];
col_ssd[0 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[1];
diff2 = leftPixel2 - rightPixel2[1];
col_ssd[1 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[2];
diff2 = leftPixel2 - rightPixel2[2];
col_ssd[2 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[3];
diff2 = leftPixel2 - rightPixel2[3];
col_ssd[3 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[4];
diff2 = leftPixel2 - rightPixel2[4];
col_ssd[4 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[5];
diff2 = leftPixel2 - rightPixel2[5];
col_ssd[5 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[6];
diff2 = leftPixel2 - rightPixel2[6];
col_ssd[6 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
diff1 = leftPixel1 - rightPixel1[7];
diff2 = leftPixel2 - rightPixel2[7];
col_ssd[7 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1);
}
template<int RADIUS>
__device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd)
{
unsigned char leftPixel1;
int idx;
unsigned int diffa[] = {0, 0, 0, 0, 0, 0, 0, 0};
for(int i = 0; i < (2 * RADIUS + 1); i++)
{
idx = y_tex * im_pitch + x_tex;
leftPixel1 = imageL[idx];
idx = idx - d;
diffa[0] += SQ(leftPixel1 - imageR[idx - 0]);
diffa[1] += SQ(leftPixel1 - imageR[idx - 1]);
diffa[2] += SQ(leftPixel1 - imageR[idx - 2]);
diffa[3] += SQ(leftPixel1 - imageR[idx - 3]);
diffa[4] += SQ(leftPixel1 - imageR[idx - 4]);
diffa[5] += SQ(leftPixel1 - imageR[idx - 5]);
diffa[6] += SQ(leftPixel1 - imageR[idx - 6]);
diffa[7] += SQ(leftPixel1 - imageR[idx - 7]);
y_tex += 1;
}
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
col_ssd[0 * (BLOCK_W + 2 * RADIUS)] = diffa[0];
col_ssd[1 * (BLOCK_W + 2 * RADIUS)] = diffa[1];
col_ssd[2 * (BLOCK_W + 2 * RADIUS)] = diffa[2];
col_ssd[3 * (BLOCK_W + 2 * RADIUS)] = diffa[3];
col_ssd[4 * (BLOCK_W + 2 * RADIUS)] = diffa[4];
col_ssd[5 * (BLOCK_W + 2 * RADIUS)] = diffa[5];
col_ssd[6 * (BLOCK_W + 2 * RADIUS)] = diffa[6];
col_ssd[7 * (BLOCK_W + 2 * RADIUS)] = diffa[7];
}
template<int RADIUS>
__global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, PtrStepb disp, int maxdisp,
int uniquenessRatio, unsigned int* cminSSDImage, size_t cminSSD_step, int cwidth, int cheight)
{
extern __shared__ unsigned int col_ssd_cache[];
uint line_ssds[2 + N_DISPARITIES]; // +2 - tail of previous batch for accurate uniquenessRatio check
uint* batch_ssds = line_ssds + 2;
uint line_ssd_tails[3*ROWSperTHREAD];
uchar uniqueness_approved[ROWSperTHREAD];
uchar local_disparity[ROWSperTHREAD];
volatile unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x;
volatile unsigned int *col_ssd_extra = threadIdx.x < (2 * RADIUS) ? col_ssd + BLOCK_W : 0;
const int X = (blockIdx.x * BLOCK_W + threadIdx.x + maxdisp + RADIUS);
const int Y = (blockIdx.y * ROWSperTHREAD + RADIUS);
unsigned int* minSSDImage = cminSSDImage + X + Y * cminSSD_step;
unsigned char* disparImage = disp.data + X + Y * disp.step;
float thresh_scale;
int end_row = ::min(ROWSperTHREAD, cheight - Y - RADIUS);
int y_tex;
int x_tex = X - RADIUS;
if (x_tex >= cwidth)
return;
for(int i = 0; i < ROWSperTHREAD; i++)
local_disparity[i] = 0;
for(int i = 0; i < 3*ROWSperTHREAD; i++)
{
line_ssd_tails[i] = UINT_MAX;
}
if (uniquenessRatio > 0)
{
batch_ssds[6] = UINT_MAX;
batch_ssds[7] = UINT_MAX;
thresh_scale = (1.0 + uniquenessRatio / 100.0f);
for(int i = 0; i < ROWSperTHREAD; i++)
{
uniqueness_approved[i] = 1;
}
}
for(int d = STEREO_MIND; d < maxdisp; d += STEREO_DISP_STEP)
{
y_tex = Y - RADIUS;
InitColSSD<RADIUS>(x_tex, y_tex, img_step, left, right, d, col_ssd);
if (col_ssd_extra != nullptr)
if (x_tex + BLOCK_W < cwidth)
InitColSSD<RADIUS>(x_tex + BLOCK_W, y_tex, img_step, left, right, d, col_ssd_extra);
__syncthreads(); //before MinSSD function
if (Y < cheight - RADIUS)
{
uint2 batch_opt = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd, X, cwidth, batch_ssds);
// For threads that do not satisfy the if condition below("X < cwidth - RADIUS"), previously
// computed "batch_opt" value, which is the result of "MinSSD" function call, is not used at all.
//
// However, since the "MinSSD" function has "__syncthreads" call in its body, those threads
// must also call "MinSSD" to avoid deadlock. (#13850)
//
// From CUDA 9, using "__syncwarp" with proper mask value instead of using "__syncthreads"
// could be an option, but the shared memory access pattern does not allow this option,
// resulting in race condition. (Checked via "cuda-memcheck --tool racecheck")
if (X < cwidth - RADIUS)
{
unsigned int last_opt = line_ssd_tails[3*0 + 0];
unsigned int opt = ::min(last_opt, batch_opt.x);
if (uniquenessRatio > 0)
{
line_ssds[0] = line_ssd_tails[3*0 + 1];
line_ssds[1] = line_ssd_tails[3*0 + 2];
float thresh = thresh_scale * opt;
int dtest = local_disparity[0];
if(batch_opt.x < last_opt)
{
uniqueness_approved[0] = 1;
dtest = d + batch_opt.y;
if ((local_disparity[0] < dtest-1 || local_disparity[0] > dtest+1) && (last_opt <= thresh))
{
uniqueness_approved[0] = 0;
}
}
if(uniqueness_approved[0])
{
// the trial to decompose the code on 2 loops without ld vs dtest makes
// uniqueness check dramatically slow. at least on gf 1080
for (int ld = d-2; ld < d + N_DISPARITIES; ld++)
{
if ((ld < dtest-1 || ld > dtest+1) && (line_ssds[ld-d+2] <= thresh))
{
uniqueness_approved[0] = 0;
break;
}
}
}
line_ssd_tails[3*0 + 1] = batch_ssds[6];
line_ssd_tails[3*0 + 2] = batch_ssds[7];
}
line_ssd_tails[3*0 + 0] = opt;
if (batch_opt.x < last_opt)
{
local_disparity[0] = (unsigned char)(d + batch_opt.y);
}
}
}
for(int row = 1; row < end_row; row++)
{
int idx1 = y_tex * img_step + x_tex;
int idx2 = (y_tex + (2 * RADIUS + 1)) * img_step + x_tex;
__syncthreads();
StepDown<RADIUS>(idx1, idx2, left, right, d, col_ssd);
if (col_ssd_extra)
if (x_tex + BLOCK_W < cwidth)
StepDown<RADIUS>(idx1, idx2, left + BLOCK_W, right + BLOCK_W, d, col_ssd_extra);
y_tex += 1;
__syncthreads();
if (row < cheight - RADIUS - Y)
{
uint2 batch_opt = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd, X, cwidth, batch_ssds);
// For threads that do not satisfy the if condition below("X < cwidth - RADIUS"), previously
// computed "batch_opt" value, which is the result of "MinSSD" function call, is not used at all.
//
// However, since the "MinSSD" function has "__syncthreads" call in its body, those threads
// must also call "MinSSD" to avoid deadlock. (#13850)
//
// From CUDA 9, using "__syncwarp" with proper mask value instead of using "__syncthreads"
// could be an option, but the shared memory access pattern does not allow this option,
// resulting in race condition. (Checked via "cuda-memcheck --tool racecheck")
if (X < cwidth - RADIUS)
{
unsigned int last_opt = line_ssd_tails[3*row + 0];
unsigned int opt = ::min(last_opt, batch_opt.x);
if (uniquenessRatio > 0)
{
line_ssds[0] = line_ssd_tails[3*row + 1];
line_ssds[1] = line_ssd_tails[3*row + 2];
float thresh = thresh_scale * opt;
int dtest = local_disparity[row];
if(batch_opt.x < last_opt)
{
uniqueness_approved[row] = 1;
dtest = d + batch_opt.y;
if ((local_disparity[row] < dtest-1 || local_disparity[row] > dtest+1) && (last_opt <= thresh))
{
uniqueness_approved[row] = 0;
}
}
if(uniqueness_approved[row])
{
for (int ld = 0; ld < N_DISPARITIES + 2; ld++)
{
if (((d+ld-2 < dtest-1) || (d+ld-2 > dtest+1)) && (line_ssds[ld] <= thresh))
{
uniqueness_approved[row] = 0;
break;
}
}
}
line_ssd_tails[3*row + 1] = batch_ssds[6];
line_ssd_tails[3*row + 2] = batch_ssds[7];
}
line_ssd_tails[3*row + 0] = opt;
if (batch_opt.x < last_opt)
{
local_disparity[row] = (unsigned char)(d + batch_opt.y);
}
}
}
} // for row loop
__syncthreads(); // before initializing shared memory at the beginning of next loop
} // for d loop
for (int row = 0; row < end_row; row++)
{
minSSDImage[row * cminSSD_step] = line_ssd_tails[3*row + 0];
}
if (uniquenessRatio > 0)
{
for (int row = 0; row < end_row; row++)
{
// drop disparity for pixel where uniqueness requirement was not satisfied (zero value)
disparImage[disp.step * row] = local_disparity[row] * uniqueness_approved[row];
}
}
else
{
for (int row = 0; row < end_row; row++)
{
disparImage[disp.step * row] = local_disparity[row];
}
}
}
template<int RADIUS> void kernel_caller(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp,
int maxdisp, int uniquenessRatio, unsigned int* missd_buffer,
size_t minssd_step, int cwidth, int cheight, cudaStream_t & stream)
{
dim3 grid(1,1,1);
dim3 threads(BLOCK_W, 1, 1);
grid.x = divUp(left.cols - maxdisp - 2 * RADIUS, BLOCK_W);
grid.y = divUp(left.rows - 2 * RADIUS, ROWSperTHREAD);
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS)
size_t smem_size = (BLOCK_W + N_DISPARITIES * (BLOCK_W + 2 * RADIUS)) * sizeof(unsigned int);
stereoKernel<RADIUS><<<grid, threads, smem_size, stream>>>(left.data, right.data, left.step, disp, maxdisp, uniquenessRatio,
missd_buffer, minssd_step, cwidth, cheight);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
};
typedef void (*kernel_caller_t)(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp,
int maxdisp, int uniquenessRatio, unsigned int* missd_buffer,
size_t minssd_step, int cwidth, int cheight, cudaStream_t & stream);
const static kernel_caller_t callers[] =
{
0,
kernel_caller< 1>, kernel_caller< 2>, kernel_caller< 3>, kernel_caller< 4>, kernel_caller< 5>,
kernel_caller< 6>, kernel_caller< 7>, kernel_caller< 8>, kernel_caller< 9>, kernel_caller<10>,
kernel_caller<11>, kernel_caller<12>, kernel_caller<13>, kernel_caller<14>, kernel_caller<15>,
kernel_caller<16>, kernel_caller<17>, kernel_caller<18>, kernel_caller<19>, kernel_caller<20>,
kernel_caller<21>, kernel_caller<22>, kernel_caller<23>, kernel_caller<24>, kernel_caller<25>
//0,0,0, 0,0,0, 0,0,kernel_caller<9>
};
const int calles_num = sizeof(callers)/sizeof(callers[0]);
void stereoBM_CUDA(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp,
int winsz, int uniquenessRatio, const PtrStepSz<unsigned int>& minSSD_buf, cudaStream_t& stream)
{
int winsz2 = winsz >> 1;
if (winsz2 == 0 || winsz2 >= calles_num)
CV_Error(cv::Error::StsBadArg, "Unsupported window size");
cudaSafeCall( cudaMemset2DAsync(disp.data, disp.step, 0, disp.cols, disp.rows, stream) );
cudaSafeCall( cudaMemset2DAsync(minSSD_buf.data, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp.rows, stream) );
size_t minssd_step = minSSD_buf.step/minSSD_buf.elemSize();
callers[winsz2](left, right, disp, maxdisp, uniquenessRatio, minSSD_buf.data, minssd_step, left.cols, left.rows, stream);
}
__device__ inline int clamp(int x, int a, int b)
{
return ::max(a, ::min(b, x));
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Sobel Prefiler ///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void prefilter_kernel_xsobel(PtrStepSzb input, PtrStepSzb output, int prefilterCap)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < output.cols && y < output.rows)
{
int conv = input.ptr(::max(0,y-1))[::max(0,x-1)] * (-1) + input.ptr(::max(0, y-1))[::min(x+1, input.cols-1)] * (1) +
input.ptr(y )[::max(0,x-1)] * (-2) + input.ptr(y )[::min(x+1, input.cols-1)] * (2) +
input.ptr(::min(y+1, input.rows-1))[::max(0,x-1)] * (-1) + input.ptr(::min(y+1, input.rows-1))[::min(x+1,input.cols-1)] * (1);
conv = ::min(::min(::max(-prefilterCap, conv), prefilterCap) + prefilterCap, 255);
output.ptr(y)[x] = conv & 0xFF;
}
}
void prefilter_xsobel(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, cudaStream_t & stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, threads.y);
prefilter_kernel_xsobel<<<grid, threads, 0, stream>>>(input, output, prefilterCap);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// Norm Prefiler ///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void prefilter_kernel_norm(PtrStepSzb input, PtrStepSzb output, int prefilterCap, int scale_g, int scale_s, int winsize)
{
// prefilterCap in range 1..63, checked in StereoBMImpl::compute
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int cols = input.cols;
int rows = input.rows;
int WSZ2 = winsize / 2;
if(x < cols && y < rows)
{
int cov1 = input.ptr(::max(y-1, 0))[x] * 1 +
input.ptr(y)[::min(x+1, cols-1)] * 1 + input.ptr(y )[x] * 4 + input.ptr(y)[::min(x+1, cols-1)] * 1 +
input.ptr(::min(y+1, rows-1))[x] * 1;
int cov2 = 0;
for(int i = -WSZ2; i < WSZ2+1; i++)
for(int j = -WSZ2; j < WSZ2+1; j++)
cov2 += input.ptr(clamp(y+i, 0, rows-1))[clamp(x+j, 0, cols-1)];
int res = (cov1*scale_g - cov2*scale_s)>>10;
res = clamp(res, -prefilterCap, prefilterCap) + prefilterCap;
output.ptr(y)[x] = res;
}
}
void prefilter_norm(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, int winsize, cudaStream_t & stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, threads.y);
int scale_g = winsize*winsize/8, scale_s = (1024 + scale_g)/(scale_g*2);
scale_g *= scale_s;
prefilter_kernel_norm<<<grid, threads, 0, stream>>>(input, output, prefilterCap, scale_g, scale_s, winsize);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////// Textureness filtering ////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
texture<unsigned char, 2, cudaReadModeNormalizedFloat> texForTF;
__device__ __forceinline__ float sobel(int x, int y)
{
float conv = tex2D(texForTF, x - 1, y - 1) * (-1) + tex2D(texForTF, x + 1, y - 1) * (1) +
tex2D(texForTF, x - 1, y ) * (-2) + tex2D(texForTF, x + 1, y ) * (2) +
tex2D(texForTF, x - 1, y + 1) * (-1) + tex2D(texForTF, x + 1, y + 1) * (1);
return fabs(conv);
}
__device__ float CalcSums(float *cols, float *cols_cache, int winsz)
{
float cache = 0;
float cache2 = 0;
int winsz2 = winsz/2;
for(int i = 1; i <= winsz2; i++)
cache += cols[i];
cols_cache[0] = cache;
__syncthreads();
if (threadIdx.x < blockDim.x - winsz2)
cache2 = cols_cache[winsz2];
else
for(int i = winsz2 + 1; i < winsz; i++)
cache2 += cols[i];
return cols[0] + cache + cache2;
}
#define RpT (2 * ROWSperTHREAD) // got experimentally
__global__ void textureness_kernel(PtrStepSzb disp, int winsz, float threshold)
{
int winsz2 = winsz/2;
int n_dirty_pixels = (winsz2) * 2;
extern __shared__ float cols_cache[];
float *cols = cols_cache + blockDim.x + threadIdx.x;
float *cols_extra = threadIdx.x < n_dirty_pixels ? cols + blockDim.x : 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int beg_row = blockIdx.y * RpT;
int end_row = ::min(beg_row + RpT, disp.rows);
if (x < disp.cols)
{
int y = beg_row;
float sum = 0;
float sum_extra = 0;
for(int i = y - winsz2; i <= y + winsz2; ++i)
{
sum += sobel(x - winsz2, i);
if (cols_extra)
sum_extra += sobel(x + blockDim.x - winsz2, i);
}
*cols = sum;
if (cols_extra)
*cols_extra = sum_extra;
__syncthreads();
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
if (sum_win < threshold)
disp.data[y * disp.step + x] = 0;
__syncthreads();
for(int y = beg_row + 1; y < end_row; ++y)
{
sum = sum - sobel(x - winsz2, y - winsz2 - 1) + sobel(x - winsz2, y + winsz2);
*cols = sum;
if (cols_extra)
{
sum_extra = sum_extra - sobel(x + blockDim.x - winsz2, y - winsz2 - 1) + sobel(x + blockDim.x - winsz2, y + winsz2);
*cols_extra = sum_extra;
}
__syncthreads();
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255;
if (sum_win < threshold)
disp.data[y * disp.step + x] = 0;
__syncthreads();
}
}
}
void postfilter_textureness(const PtrStepSzb& input, int winsz, float avgTexturenessThreshold, const PtrStepSzb& disp, cudaStream_t & stream)
{
avgTexturenessThreshold *= winsz * winsz;
texForTF.filterMode = cudaFilterModeLinear;
texForTF.addressMode[0] = cudaAddressModeWrap;
texForTF.addressMode[1] = cudaAddressModeWrap;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
cudaSafeCall( cudaBindTexture2D( 0, texForTF, input.data, desc, input.cols, input.rows, input.step ) );
dim3 threads(128, 1, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(input.cols, threads.x);
grid.y = divUp(input.rows, RpT);
size_t smem_size = (threads.x + threads.x + (winsz/2) * 2 ) * sizeof(float);
textureness_kernel<<<grid, threads, smem_size, stream>>>(disp, winsz, avgTexturenessThreshold);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
cudaSafeCall( cudaUnbindTexture (texForTF) );
}
} // namespace stereobm
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
|
1c07f321e7dc39e0ec3dfca24d5dc44d9e6886e3.hip | // !!! This is a file automatically generated by hipify!!!
#define USING_OPENCV
#include <iostream>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include <fstream>
#include <ctime>
#include <string>
using namespace std;
void handleError(hipError_t error, const char* text) {
if (error != hipSuccess) {
cout << "ERROR " << text << endl;
exit(1);
}
}
class getInput{
unsigned int M; // rows
unsigned int N; // columns
float* d; // data
public:
getInput(){
M = 0;
N = 0;
d = NULL;
}
getInput(const char *filename){
ifstream infile; // read the input matrix file
infile.open(filename, std::ios::in | std::ios::binary); // open as binary input file
infile.read((char*)&M, sizeof(unsigned int)); // read the number of rows
infile.read((char*)&N, sizeof(unsigned int)); // read the number of columns
d = (float*) malloc (M * N * sizeof(float)); // allocating memory for the matrix
infile.read( (char*)d , sizeof(float) * M * N); // read and copy the data to the memory
infile.close();
}
__host__ __device__ unsigned int rows(){
return M; //#rows
}
__host__ __device__ unsigned int cols(){
return N; //#cols
}
__host__ __device__ float * data(){
return d; //matrix data
}
};
__global__ void gpu_mul(getInput A,getInput B, float* d_A, float* d_B, float* d_C) {
__shared__ float sA[32][32]; // Tile size of 32x32
__shared__ float sB[32][32];
int Row = blockDim.y*blockIdx.y + threadIdx.y;
int Col = blockDim.x*blockIdx.x + threadIdx.x;
float temp = 0.0;
sA[threadIdx.y][threadIdx.x] = 0.0;
sB[threadIdx.y][threadIdx.x] = 0.0;
for (int k = 0; k < (((A.cols() - 1)/ 32) + 1); k++)
{
if ( (Row < A.rows()) && (threadIdx.x + (k*32)) < A.cols())
{
sA[threadIdx.y][threadIdx.x] = d_A[(Row * A.cols()) + threadIdx.x + (k*32)];
}
else
{
sA[threadIdx.y][threadIdx.x] = 0.0;
}
if ( Col < B.cols() && (threadIdx.y + k*32) < B.rows())
{
sB[threadIdx.y][threadIdx.x] = d_B[(threadIdx.y + k*32) * B.cols() + Col];
}
else
{
sB[threadIdx.y][threadIdx.x] = 0.0;
}
__syncthreads();
for (int j = 0; j < 32; ++j)
{
temp += sA[threadIdx.y][j] * sB[j][threadIdx.x];
}
}
if (Row < A.rows() && Col < B.cols())
{
d_C[Row*B.cols() + Col] = temp;
}
}
int main(int argc, char *argv[]) {
hipEvent_t start, stop; //declaring start & stop time events
hipEventCreate(&start);
hipEventCreate(&stop);
getInput A(argv[1]); // reading input arrays A & B from Command Line
getInput B(argv[2]);
float *h_C = (float *)malloc(A.rows() * B.cols() * sizeof(float)); // Allocate host memory for output matrix C
if (A.cols() != B.rows()) { // #rows in A = #cols in B for matrix multiplication
cerr << "Invaid Matrix Multiplication" << endl;
exit(1);
}
unsigned int M = A.rows();
unsigned int N = B.cols();
float* h_A = A.data();
float* h_B = B.data();
float* C = (float*) malloc (M * N * sizeof(float));
for(unsigned int i = 1 ; i < M+1 ; i++){ // using matrix A as reference to assign the indices
for(unsigned int j = 1 ; j < N+1 ; j++){
for(unsigned int k = 1 ; k < M+1 ; k++){
C[i + M * (j-1)] += h_A[i + M * (k-1)] * h_B[k + (j-1) * N];
}
cout << C[i + M * (j-1)] << " ";
}
cout << endl;
}
hipError_t error;
float* d_A;
size_t bytesA = A.rows() * A.cols() * sizeof(float); // allocating GPU memory for input matrix A
error = hipMalloc((void**)&d_A, bytesA);
handleError(error, "allocating GPU memory for input matrix A");
float* d_B;
size_t bytesB = B.rows() * B.cols() * sizeof(float); // allocating GPU memory for input matrix B
error = hipMalloc((void**)&d_B, bytesB);
handleError(error, "allocating GPU memory for matrix B");
float* d_C;
size_t bytesC = A.rows() * B.cols() * sizeof(float); // allocating GPU memory for product C = A*B
error = hipMalloc((void**)&d_C, bytesC);
handleError(error, "allocating memory on the device for matrix C");
error = hipMemcpy(d_A, A.data(), bytesA, hipMemcpyHostToDevice);
handleError(error, "copying matrix A from host to device"); // copying matrix A from host to device
error = hipMemcpy(d_B, B.data(), bytesB, hipMemcpyHostToDevice);
handleError(error, "copying matrix B from host to device"); // copying matrix B from host to device
dim3 gridDim(A.rows()/32 +1, B.cols()/32 +1, 1);
dim3 blockDim(32,32,1); // two dimensional grid & block configuration
hipEventRecord(start); //start recording time
gpu_mul << <gridDim, blockDim>> >(A, B, d_A, d_B, d_C);
hipDeviceSynchronize();
hipEventRecord(stop); //stop recording time
hipMemcpy(h_C,d_C,A.rows() * B.cols() * sizeof(float),hipMemcpyDeviceToHost); // Copy (and print) the result on host memory
hipEventSynchronize(stop);
float milliseconds; //time in milliseconds
hipEventElapsedTime(&milliseconds, start, stop);
ofstream outfile;
outfile.open(argv[3], std::ios::out);
if(outfile.is_open()){
for (unsigned int p = 1; p < A.rows()+1; p++){ //priting result h_C on host to txt file
for (unsigned int q = 1; q < B.cols()+1; q++){
outfile << h_C[(p-1) * A.rows() + (q-1)];
outfile << "\t";
}
outfile << endl;
}
outfile << "Total GPU time using shared memory implementation: " << milliseconds;
}
else{
cout << "Error in opening the file" << endl;}
const char* file = "mse.txt";
ofstream outfile1;
outfile1.open(file, std::ios::out);
if (outfile1.is_open()){
for (int i=0; i < A.rows()*B.cols(); i++)
{
if (C[i] != d_C[i] )
{
outfile1 << "Mismatch at Row = " << i /B.cols() << "Col = " << i % B.cols() << "cpu[] = " << C[i] << "gpu[] = " << d_C[i] << "\n";
outfile1 << endl;
break;
}
}
}
else{
cout << "Error in opening the file" << endl;
}
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
}
| 1c07f321e7dc39e0ec3dfca24d5dc44d9e6886e3.cu | #define USING_OPENCV
#include <iostream>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <fstream>
#include <ctime>
#include <string>
using namespace std;
void handleError(cudaError_t error, const char* text) {
if (error != cudaSuccess) {
cout << "ERROR " << text << endl;
exit(1);
}
}
class getInput{
unsigned int M; // rows
unsigned int N; // columns
float* d; // data
public:
getInput(){
M = 0;
N = 0;
d = NULL;
}
getInput(const char *filename){
ifstream infile; // read the input matrix file
infile.open(filename, std::ios::in | std::ios::binary); // open as binary input file
infile.read((char*)&M, sizeof(unsigned int)); // read the number of rows
infile.read((char*)&N, sizeof(unsigned int)); // read the number of columns
d = (float*) malloc (M * N * sizeof(float)); // allocating memory for the matrix
infile.read( (char*)d , sizeof(float) * M * N); // read and copy the data to the memory
infile.close();
}
__host__ __device__ unsigned int rows(){
return M; //#rows
}
__host__ __device__ unsigned int cols(){
return N; //#cols
}
__host__ __device__ float * data(){
return d; //matrix data
}
};
__global__ void gpu_mul(getInput A,getInput B, float* d_A, float* d_B, float* d_C) {
__shared__ float sA[32][32]; // Tile size of 32x32
__shared__ float sB[32][32];
int Row = blockDim.y*blockIdx.y + threadIdx.y;
int Col = blockDim.x*blockIdx.x + threadIdx.x;
float temp = 0.0;
sA[threadIdx.y][threadIdx.x] = 0.0;
sB[threadIdx.y][threadIdx.x] = 0.0;
for (int k = 0; k < (((A.cols() - 1)/ 32) + 1); k++)
{
if ( (Row < A.rows()) && (threadIdx.x + (k*32)) < A.cols())
{
sA[threadIdx.y][threadIdx.x] = d_A[(Row * A.cols()) + threadIdx.x + (k*32)];
}
else
{
sA[threadIdx.y][threadIdx.x] = 0.0;
}
if ( Col < B.cols() && (threadIdx.y + k*32) < B.rows())
{
sB[threadIdx.y][threadIdx.x] = d_B[(threadIdx.y + k*32) * B.cols() + Col];
}
else
{
sB[threadIdx.y][threadIdx.x] = 0.0;
}
__syncthreads();
for (int j = 0; j < 32; ++j)
{
temp += sA[threadIdx.y][j] * sB[j][threadIdx.x];
}
}
if (Row < A.rows() && Col < B.cols())
{
d_C[Row*B.cols() + Col] = temp;
}
}
int main(int argc, char *argv[]) {
cudaEvent_t start, stop; //declaring start & stop time events
cudaEventCreate(&start);
cudaEventCreate(&stop);
getInput A(argv[1]); // reading input arrays A & B from Command Line
getInput B(argv[2]);
float *h_C = (float *)malloc(A.rows() * B.cols() * sizeof(float)); // Allocate host memory for output matrix C
if (A.cols() != B.rows()) { // #rows in A = #cols in B for matrix multiplication
cerr << "Invaid Matrix Multiplication" << endl;
exit(1);
}
unsigned int M = A.rows();
unsigned int N = B.cols();
float* h_A = A.data();
float* h_B = B.data();
float* C = (float*) malloc (M * N * sizeof(float));
for(unsigned int i = 1 ; i < M+1 ; i++){ // using matrix A as reference to assign the indices
for(unsigned int j = 1 ; j < N+1 ; j++){
for(unsigned int k = 1 ; k < M+1 ; k++){
C[i + M * (j-1)] += h_A[i + M * (k-1)] * h_B[k + (j-1) * N];
}
cout << C[i + M * (j-1)] << " ";
}
cout << endl;
}
cudaError_t error;
float* d_A;
size_t bytesA = A.rows() * A.cols() * sizeof(float); // allocating GPU memory for input matrix A
error = cudaMalloc((void**)&d_A, bytesA);
handleError(error, "allocating GPU memory for input matrix A");
float* d_B;
size_t bytesB = B.rows() * B.cols() * sizeof(float); // allocating GPU memory for input matrix B
error = cudaMalloc((void**)&d_B, bytesB);
handleError(error, "allocating GPU memory for matrix B");
float* d_C;
size_t bytesC = A.rows() * B.cols() * sizeof(float); // allocating GPU memory for product C = A*B
error = cudaMalloc((void**)&d_C, bytesC);
handleError(error, "allocating memory on the device for matrix C");
error = cudaMemcpy(d_A, A.data(), bytesA, cudaMemcpyHostToDevice);
handleError(error, "copying matrix A from host to device"); // copying matrix A from host to device
error = cudaMemcpy(d_B, B.data(), bytesB, cudaMemcpyHostToDevice);
handleError(error, "copying matrix B from host to device"); // copying matrix B from host to device
dim3 gridDim(A.rows()/32 +1, B.cols()/32 +1, 1);
dim3 blockDim(32,32,1); // two dimensional grid & block configuration
cudaEventRecord(start); //start recording time
gpu_mul << <gridDim, blockDim>> >(A, B, d_A, d_B, d_C);
cudaThreadSynchronize();
cudaEventRecord(stop); //stop recording time
cudaMemcpy(h_C,d_C,A.rows() * B.cols() * sizeof(float),cudaMemcpyDeviceToHost); // Copy (and print) the result on host memory
cudaEventSynchronize(stop);
float milliseconds; //time in milliseconds
cudaEventElapsedTime(&milliseconds, start, stop);
ofstream outfile;
outfile.open(argv[3], std::ios::out);
if(outfile.is_open()){
for (unsigned int p = 1; p < A.rows()+1; p++){ //priting result h_C on host to txt file
for (unsigned int q = 1; q < B.cols()+1; q++){
outfile << h_C[(p-1) * A.rows() + (q-1)];
outfile << "\t";
}
outfile << endl;
}
outfile << "Total GPU time using shared memory implementation: " << milliseconds;
}
else{
cout << "Error in opening the file" << endl;}
const char* file = "mse.txt";
ofstream outfile1;
outfile1.open(file, std::ios::out);
if (outfile1.is_open()){
for (int i=0; i < A.rows()*B.cols(); i++)
{
if (C[i] != d_C[i] )
{
outfile1 << "Mismatch at Row = " << i /B.cols() << "Col = " << i % B.cols() << "cpu[] = " << C[i] << "gpu[] = " << d_C[i] << "\n";
outfile1 << endl;
break;
}
}
}
else{
cout << "Error in opening the file" << endl;
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
ed10579bd4a824e473663e05d9a747b706b280d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_plus_4_right;
int xdim0_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_plus_4_right;
int ydim0_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_plus_4_right;
int xdim1_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_plus_4_right;
int ydim1_update_halo_kernel2_zvel_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_zvel_plus_4_right * (y) + \
xdim0_update_halo_kernel2_zvel_plus_4_right * \
ydim0_update_halo_kernel2_zvel_plus_4_right * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_zvel_plus_4_right * (y) + \
xdim1_update_halo_kernel2_zvel_plus_4_right * \
ydim1_update_halo_kernel2_zvel_plus_4_right * (z))
// user function
__device__
inline void
update_halo_kernel2_zvel_plus_4_right(double *zvel0, double *zvel1,
const int *fields) {
if (fields[FIELD_ZVEL0] == 1)
zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(-4, 0, 0)];
if (fields[FIELD_ZVEL1] == 1)
zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(-4, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_plus_4_right(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_right +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_right *
ydim0_update_halo_kernel2_zvel_plus_4_right;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_right +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_right *
ydim1_update_halo_kernel2_zvel_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_plus_4_right(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 98))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(98, "update_halo_kernel2_zvel_plus_4_right");
OPS_kernels[98].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_right_h ||
ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_right_h ||
xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_right_h ||
ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_right_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_4_right, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_zvel_plus_4_right_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_4_right, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_zvel_plus_4_right_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_4_right, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_zvel_plus_4_right_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_4_right, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_zvel_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[98].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_4_right), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[98].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[98].mpi_time += t2 - t1;
OPS_kernels[98].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[98].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| ed10579bd4a824e473663e05d9a747b706b280d4.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_plus_4_right;
int xdim0_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_plus_4_right;
int ydim0_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_plus_4_right;
int xdim1_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_plus_4_right;
int ydim1_update_halo_kernel2_zvel_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_zvel_plus_4_right * (y) + \
xdim0_update_halo_kernel2_zvel_plus_4_right * \
ydim0_update_halo_kernel2_zvel_plus_4_right * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_zvel_plus_4_right * (y) + \
xdim1_update_halo_kernel2_zvel_plus_4_right * \
ydim1_update_halo_kernel2_zvel_plus_4_right * (z))
// user function
__device__
inline void
update_halo_kernel2_zvel_plus_4_right(double *zvel0, double *zvel1,
const int *fields) {
if (fields[FIELD_ZVEL0] == 1)
zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(-4, 0, 0)];
if (fields[FIELD_ZVEL1] == 1)
zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(-4, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_plus_4_right(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_right +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_right *
ydim0_update_halo_kernel2_zvel_plus_4_right;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_right +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_right *
ydim1_update_halo_kernel2_zvel_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_plus_4_right(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 98))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(98, "update_halo_kernel2_zvel_plus_4_right");
OPS_kernels[98].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_right_h ||
ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_right_h ||
xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_right_h ||
ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_right_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_4_right, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_zvel_plus_4_right_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_4_right, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_zvel_plus_4_right_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_4_right, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_zvel_plus_4_right_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_4_right, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_zvel_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[98].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_zvel_plus_4_right<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[98].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[98].mpi_time += t2 - t1;
OPS_kernels[98].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[98].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
0defcae0f31606ca7adf2df59deb10407a3e2200.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021,22 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#define CUB_NS_PREFIX namespace kaolin {
#define CUB_NS_POSTFIX }
#define CUB_NS_QUALIFIER ::kaolin::cub
#include <stdio.h>
#include <ATen/ATen.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#ifdef EXPERIMENTAL
#include <ATen/native/hip/KernelUtils.cuh>
#else
#include <THH/THHAtomics.cuh>
#endif
// TODO(ttakikawa): newer versions of PyTorch will migrate to <ATen/cuda/Atomics.cuh>.
// How do we manage these dependencies?
#define CUB_STDERR
#include <hipcub/hipcub.hpp>
#include "../../spc_math.h"
#include "../../spc_utils.cuh"
#include "spc_render_utils.cuh"
namespace kaolin {
using namespace at::indexing;
#define RT_NUM_THREADS 1024
////////////////////////////////////////////////////////////////////////////////////////////////
/// Constants
////////////////////////////////////////////////////////////////////////////////////////////////
__constant__ uint8_t VOXEL_ORDER[8][8] = {
{ 0, 1, 2, 4, 3, 5, 6, 7 },
{ 1, 0, 3, 5, 2, 4, 7, 6 },
{ 2, 0, 3, 6, 1, 4, 7, 5 },
{ 3, 1, 2, 7, 0, 5, 6, 4 },
{ 4, 0, 5, 6, 1, 2, 7, 3 },
{ 5, 1, 4, 7, 0, 3, 6, 2 },
{ 6, 2, 4, 7, 0, 3, 5, 1 },
{ 7, 3, 5, 6, 1, 2, 4, 0 }
};
////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernels
////////////////////////////////////////////////////////////////////////////////////////////////
// This function will initialize the nuggets array with each ray pointing to the octree root
__global__ void
init_nuggets_cuda_kernel(
const uint num,
uint2* nuggets) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
nuggets[tidx].x = tidx; // ray idx
nuggets[tidx].y = 0; // point idx
}
}
// This function will iterate over the nuggets (ray intersection proposals) and determine if they
// result in an intersection. If they do, the info tensor is populated with the # of child nodes
// as determined by the input octree.
__global__ void
decide_cuda_kernel(
const uint num,
const point_data* __restrict__ points,
const float3* __restrict__ ray_o,
const float3* __restrict__ ray_d,
const uint2* __restrict__ nuggets,
uint* __restrict__ info,
const uint8_t* __restrict__ octree,
const uint32_t level,
const uint32_t not_done) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
uint ridx = nuggets[tidx].x;
uint pidx = nuggets[tidx].y;
point_data p = points[pidx];
float3 o = ray_o[ridx];
float3 d = ray_d[ridx];
// Radius of voxel
float r = 1.0 / ((float)(0x1 << level));
// Transform to [-1, 1]
const float3 vc = make_float3(
fmaf(r, fmaf(2.0, p.x, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.y, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.z, 1.0), -1.0f));
// Compute aux info (precompute to optimize)
float3 sgn = ray_sgn(d);
float3 ray_inv = make_float3(1.0 / d.x, 1.0 / d.y, 1.0 / d.z);
float depth = ray_aabb(o, d, ray_inv, sgn, vc, r);
if (not_done){
if (depth != 0.0)
info[tidx] = __popc(octree[pidx]);
else
info[tidx] = 0;
}
else { // at bottom
if (depth > 0.0)
info[tidx] = 1;
else
info[tidx] = 0;
}
}
}
// Overloaded version of function above that returns depth of voxel/ ray entry points
__global__ void
decide_cuda_kernel(
const uint num,
const point_data* __restrict__ points,
const float3* __restrict__ ray_o,
const float3* __restrict__ ray_d,
const uint2* __restrict__ nuggets,
float* depth,
uint* __restrict__ info,
const uint8_t* __restrict__ octree,
const uint32_t level) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
uint ridx = nuggets[tidx].x;
uint pidx = nuggets[tidx].y;
point_data p = points[pidx];
float3 o = ray_o[ridx];
float3 d = ray_d[ridx];
// Radius of voxel
float r = 1.0 / ((float)(0x1 << level));
// Transform to [-1, 1]
const float3 vc = make_float3(
fmaf(r, fmaf(2.0, p.x, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.y, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.z, 1.0), -1.0f));
// Compute aux info (precompute to optimize)
float3 sgn = ray_sgn(d);
float3 ray_inv = make_float3(1.0 / d.x, 1.0 / d.y, 1.0 / d.z);
depth[tidx] = ray_aabb(o, d, ray_inv, sgn, vc, r);
// Perform AABB check
if (depth[tidx] > 0.0){
info[tidx] = 1; // mark to keep
} else {
info[tidx] = 0;
}
}
}
// Overloaded version of function above that returns depth of voxel/ ray entry and exit points
__global__ void
decide_cuda_kernel(
const uint num,
const point_data* __restrict__ points,
const float3* __restrict__ ray_o,
const float3* __restrict__ ray_d,
const uint2* __restrict__ nuggets,
float2* __restrict__ depth,
uint* __restrict__ info,
const uint8_t* __restrict__ octree,
const uint32_t level) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
uint ridx = nuggets[tidx].x;
uint pidx = nuggets[tidx].y;
point_data p = points[pidx];
float3 o = ray_o[ridx];
float3 d = ray_d[ridx];
// Radius of voxel
float r = 1.0 / ((float)(0x1 << level));
// Transform to [-1, 1]
const float3 vc = make_float3(
fmaf(r, fmaf(2.0, p.x, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.y, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.z, 1.0), -1.0f));
// Compute aux info (precompute to optimize)
float3 sgn = ray_sgn(d);
float3 ray_inv = make_float3(1.0 / d.x, 1.0 / d.y, 1.0 / d.z);
depth[tidx] = ray_aabb_with_exit(o, d, ray_inv, vc, r);
// Perform AABB check
if (depth[tidx].x > 0.0 && depth[tidx].y > 0.0){
info[tidx] = 1; // mark to keep
} else {
info[tidx] = 0;
}
}
}
// This function will iterate over the nugget array, and for each nuggets stores the child indices of the
// nuggets (as defined by the octree tensor)
__global__ void
subdivide_cuda_kernel(
const uint num,
const uint2* __restrict__ nuggets_in,
uint2* __restrict__ nuggets_out,
const float3* __restrict__ ray_o,
const point_data* __restrict__ points,
const uint8_t* __restrict__ octree,
const uint* __restrict__ exclusive_sum,
const uint* __restrict__ info,
const uint* __restrict__ prefix_sum,
const uint32_t level) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num && info[tidx]) {
uint ridx = nuggets_in[tidx].x;
int pidx = nuggets_in[tidx].y;
point_data p = points[pidx];
uint base_idx = prefix_sum[tidx];
uint8_t o = octree[pidx];
uint s = exclusive_sum[pidx];
float scale = 1.0 / ((float)(0x1 << level));
float3 org = ray_o[ridx];
float x = (0.5f * org.x + 0.5f) - scale*((float)p.x + 0.5);
float y = (0.5f * org.y + 0.5f) - scale*((float)p.y + 0.5);
float z = (0.5f * org.z + 0.5f) - scale*((float)p.z + 0.5);
uint code = 0;
if (x > 0) code = 4;
if (y > 0) code += 2;
if (z > 0) code += 1;
for (uint i = 0; i < 8; i++) {
uint j = VOXEL_ORDER[code][i];
if (o&(0x1 << j)) {
uint cnt = __popc(o&((0x2 << j) - 1)); // count set bits up to child - inclusive sum
nuggets_out[base_idx].y = s + cnt;
nuggets_out[base_idx++].x = ridx;
}
}
}
}
template<typename scalar_t>
__global__ void
mark_pack_boundaries_cuda_kernel(
const int64_t num,
const scalar_t* __restrict__ pack_ids,
uint* __restrict__ boundaries) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
if (tidx == 0) {
boundaries[tidx] = 1;
} else {
boundaries[tidx] = pack_ids[tidx - 1] == pack_ids[tidx] ? 0 : 1;
}
}
}
// This function will take a buffer and remove the zero pads
template<typename scalar_t>
__global__ void
compactify_cuda_kernel(
const uint num,
const scalar_t* __restrict__ buffer_in,
scalar_t* __restrict__ buffer_out,
const uint* __restrict__ info,
const uint* __restrict__ prefix_sum) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num && info[tidx]) {
buffer_out[prefix_sum[tidx]] = buffer_in[tidx];
}
}
template<typename scalar_t>
__global__ void
diff_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
const int64_t* __restrict__ pack_indices) {
int64_t tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int64_t upper_bound = (tidx == num_packs-1) ? num_feats : pack_indices[tidx+1];
for (int64_t i=pack_indices[tidx]; i<upper_bound-1; ++i) {
for (int64_t j=0; j<feat_dim; ++j) {
feats_out[i * feat_dim + j] = feats_in[(i+1) * feat_dim + j] - feats_in[i * feat_dim + j];
}
}
}
}
template<typename scalar_t>
__global__ void
sum_reduce_cuda_kernel(
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
const int32_t* __restrict__ inclusive_sum) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_feats) {
for (int i=0; i<feat_dim; ++i) {
int idx = (inclusive_sum[tidx]-1) * feat_dim + i;
# ifdef EXPERIMENTAL
int numel = num_feats*feat_dim;
at::native::fastAtomicAdd(feats_out, idx, numel, feats_in[tidx * feat_dim + i], true);
# else
gpuAtomicAdd(feats_out + idx, feats_in[tidx * feat_dim + i]);
# endif
}
}
}
// This kernel is the same as sum_reduce but avoids atomic add by packing the ops.
// It however will cause thread divergence.
template<typename scalar_t>
__global__ void
packed_sum_reduce_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
const int64_t* __restrict__ pack_indices) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int64_t upper_bound = (tidx == num_packs-1) ? num_feats*feat_dim : pack_indices[tidx+1];
for (int i=pack_indices[tidx]; i<upper_bound-1; ++i) {
for (int j=0; j<feat_dim; ++j) {
feats_out[i * feat_dim + j] += feats_in[i * feat_dim + j];
}
}
}
}
template<typename scalar_t>
__global__ void
cumprod_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
int32_t* __restrict__ pack_indices, // maps idx of pack -> beginning of global idx
int32_t offset) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int upper_bound = (tidx == num_packs-1) ? num_feats : pack_indices[tidx+1];
int begin = pack_indices[tidx];
if (offset == 0) {
for (int j=0; j<feat_dim; ++j){
feats_out[begin * feat_dim + j] = feats_in[begin * feat_dim + j];
}
}
for (int i=begin+1; i<upper_bound; ++i) {
for (int j=0; j<feat_dim; ++j){
feats_out[i * feat_dim + j] = feats_in[(i-offset) * feat_dim + j] * feats_out[(i-1) * feat_dim + j];
}
}
}
}
template<typename scalar_t>
__global__ void
cumprod_reverse_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
int32_t* __restrict__ pack_indices, // maps idx of pack -> beginning of global idx
int32_t offset) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int upper_bound = (tidx == num_packs-1) ? num_feats : pack_indices[tidx+1];
int begin = pack_indices[tidx];
if (offset == 0) {
for (int j=0; j<feat_dim; ++j){
feats_out[(upper_bound-1) * feat_dim + j] = feats_in[(upper_bound-1) * feat_dim + j];
}
}
for (int i=upper_bound-2; i>=begin; --i) {
for (int j=0; j<feat_dim; ++j){
feats_out[i * feat_dim + j] = feats_in[(i+offset) * feat_dim + j] * feats_out[(i+1) * feat_dim + j];
}
}
}
}
template<typename scalar_t>
__global__ void
cumsum_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
int32_t* __restrict__ pack_indices, // maps idx of pack -> beginning of global idx
int32_t offset) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int upper_bound = (tidx == num_packs-1) ? num_feats : pack_indices[tidx+1];
int begin = pack_indices[tidx];
if (offset == 0) {
for (int j=0; j<feat_dim; ++j){
feats_out[begin * feat_dim + j] = feats_in[begin * feat_dim + j];
}
}
for (int i=begin+1; i<upper_bound; ++i) {
for (int j=0; j<feat_dim; ++j){
feats_out[i * feat_dim + j] = feats_in[(i-offset) * feat_dim + j] + feats_out[(i-1) * feat_dim + j];
}
}
}
}
template<typename scalar_t>
__global__ void
cumsum_reverse_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
int32_t* __restrict__ pack_indices, // maps idx of pack -> beginning of global idx
int32_t offset) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int upper_bound = (tidx == num_packs-1) ? num_feats : pack_indices[tidx+1];
int begin = pack_indices[tidx];
if (offset == 0) {
for (int j=0; j<feat_dim; ++j){
feats_out[(upper_bound-1) * feat_dim + j] = feats_in[(upper_bound-1) * feat_dim + j];
}
}
for (int i=upper_bound-2; i>=begin; --i) {
for (int j=0; j<feat_dim; ++j){
feats_out[i * feat_dim + j] = feats_in[(i+offset) * feat_dim + j] + feats_out[(i+1) * feat_dim + j];
}
}
}
}
std::vector<at::Tensor> raytrace_cuda_impl(
at::Tensor octree,
at::Tensor points,
at::Tensor pyramid,
at::Tensor exclusive_sum,
at::Tensor ray_o,
at::Tensor ray_d,
uint32_t max_level,
uint32_t target_level,
bool return_depth,
bool with_exit) {
uint num = ray_o.size(0);
uint8_t* octree_ptr = octree.data_ptr<uint8_t>();
point_data* points_ptr = reinterpret_cast<point_data*>(points.data_ptr<short>());
uint* exclusive_sum_ptr = reinterpret_cast<uint*>(exclusive_sum.data_ptr<int>());
float3* ray_o_ptr = reinterpret_cast<float3*>(ray_o.data_ptr<float>());
float3* ray_d_ptr = reinterpret_cast<float3*>(ray_d.data_ptr<float>());
// allocate local GPU storage
at::Tensor nuggets0 = at::empty({num, 2}, octree.options().dtype(at::kInt));
at::Tensor nuggets1;
uint depth_dim = with_exit ? 2 : 1;
at::Tensor depths0;
at::Tensor depths1;
// Generate proposals (first proposal is root node)
hipLaunchKernelGGL(( init_nuggets_cuda_kernel), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0,
num, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()));
uint cnt, buffer = 0;
for (uint32_t l = 0; l <= target_level; l++) {
at::Tensor info = at::empty({num+1}, octree.options().dtype(at::kInt));
uint* info_ptr = reinterpret_cast<uint*>(info.data_ptr<int>());
// Do the proposals hit?
if (l == target_level && return_depth) {
depths0 = at::empty({num, depth_dim}, octree.options().dtype(at::kFloat));
if (with_exit) {
hipLaunchKernelGGL(( decide_cuda_kernel), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0,
num, points_ptr, ray_o_ptr, ray_d_ptr, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()),
reinterpret_cast<float2*>(l == target_level ? depths0.data_ptr<float>() : 0),
info_ptr, octree_ptr, l);
} else {
hipLaunchKernelGGL(( decide_cuda_kernel), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0,
num, points_ptr, ray_o_ptr, ray_d_ptr, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()),
l == target_level ? depths0.data_ptr<float>() : 0, info_ptr, octree_ptr, l);
}
}
else {
hipLaunchKernelGGL(( decide_cuda_kernel), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0,
num, points_ptr, ray_o_ptr, ray_d_ptr, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()),
info_ptr, octree_ptr, l, target_level - l);
}
at::Tensor prefix_sum = at::empty({num+1}, octree.options().dtype(at::kInt));
uint* prefix_sum_ptr = reinterpret_cast<uint*>(prefix_sum.data_ptr<int>());
// set first element to zero
CubDebugExit(hipMemcpy(prefix_sum_ptr, &buffer, sizeof(uint), hipMemcpyHostToDevice));
// set up memory for DeviceScan calls
void* temp_storage_ptr = NULL;
uint64_t temp_storage_bytes = get_cub_storage_bytes(
temp_storage_ptr, info_ptr, prefix_sum_ptr, num+1);
at::Tensor temp_storage = at::empty({(int64_t)temp_storage_bytes}, octree.options());
temp_storage_ptr = (void*)temp_storage.data_ptr<uint8_t>();
CubDebugExit(hipcub::DeviceScan::InclusiveSum(
temp_storage_ptr, temp_storage_bytes, info_ptr,
prefix_sum_ptr + 1, num)); //start sum on second element
hipMemcpy(&cnt, prefix_sum_ptr + num, sizeof(uint), hipMemcpyDeviceToHost);
// allocate local GPU storage
nuggets1 = at::empty({cnt, 2}, octree.options().dtype(at::kInt));
// miss everything
if (cnt == 0) {
num = 0;
nuggets0 = nuggets1;
if (return_depth) depths1 = at::empty({0, depth_dim}, octree.options().dtype(at::kFloat));
break;
}
// Subdivide if more levels remain, repeat
if (l < target_level) {
hipLaunchKernelGGL(( subdivide_cuda_kernel), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0,
num, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()), reinterpret_cast<uint2*>(nuggets1.data_ptr<int>()), ray_o_ptr, points_ptr,
octree_ptr, exclusive_sum_ptr, info_ptr, prefix_sum_ptr, l);
} else {
hipLaunchKernelGGL(( compactify_cuda_kernel<uint2>), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0,
num, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()), reinterpret_cast<uint2*>(nuggets1.data_ptr<int>()),
info_ptr, prefix_sum_ptr);
if (return_depth) {
depths1 = at::empty({cnt, depth_dim}, octree.options().dtype(at::kFloat));
if (with_exit) {
hipLaunchKernelGGL(( compactify_cuda_kernel<float2>), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0,
num, reinterpret_cast<float2*>(depths0.data_ptr<float>()),
reinterpret_cast<float2*>(depths1.data_ptr<float>()),
info_ptr, prefix_sum_ptr);
} else {
hipLaunchKernelGGL(( compactify_cuda_kernel<float>), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0,
num, depths0.data_ptr<float>(), depths1.data_ptr<float>(),
info_ptr, prefix_sum_ptr);
}
}
}
nuggets0 = nuggets1;
num = cnt;
}
if (return_depth) {
return { nuggets0.index({Slice(0, num)}).contiguous(),
depths1.index({Slice(0, num)}).contiguous() };
} else {
return { nuggets0.index({Slice(0, num)}).contiguous() };
}
}
void mark_pack_boundaries_cuda_impl(
at::Tensor pack_ids,
at::Tensor boundaries) {
int64_t num = pack_ids.size(0);
AT_DISPATCH_INTEGRAL_TYPES(pack_ids.type(), "mark_pack_boundaries_cuda", ([&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(boundaries));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( mark_pack_boundaries_cuda_kernel), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, stream,
num,
pack_ids.data_ptr<scalar_t>(),
reinterpret_cast<uint*>(boundaries.data_ptr<int>()));
}));
}
void diff_cuda_impl(
int64_t num_packs,
int64_t num_feats,
int64_t feat_dim,
at::Tensor feats_in,
at::Tensor feats_out,
at::Tensor pack_indices){
int64_t* pack_indices_ptr = pack_indices.data_ptr<int64_t>();
const int num_threads = 256;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "diff_cuda", ([&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(feats_out));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( diff_cuda_kernel<scalar_t>), dim3((num_packs+num_threads-1)/num_threads), dim3(num_threads), 0, stream,
num_packs, num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
pack_indices_ptr);
}));
}
void inclusive_sum_cuda_impl(
int64_t num,
at::Tensor info,
at::Tensor inclusive_sum){
int* info_ptr = info.data_ptr<int>();
int* inclusive_sum_ptr = inclusive_sum.data_ptr<int>();
void* temp_storage_ptr = NULL;
uint64_t temp_storage_bytes = get_cub_storage_bytes(
temp_storage_ptr, reinterpret_cast<uint*>(info_ptr), reinterpret_cast<uint*>(inclusive_sum_ptr), num);
at::Tensor temp_storage = at::zeros({(int64_t)temp_storage_bytes}, device(at::DeviceType::CUDA).dtype(at::kByte));
temp_storage_ptr = (void*)temp_storage.data_ptr<uint8_t>();
CubDebugExit(hipcub::DeviceScan::InclusiveSum(temp_storage_ptr, temp_storage_bytes, info_ptr, inclusive_sum_ptr, num));
}
int sum_reduce_cuda_impl(
int64_t num_feats,
int64_t feat_dim,
at::Tensor feats_in,
at::Tensor feats_out,
at::Tensor inclusive_sum) {
int* inclusive_sum_ptr = inclusive_sum.data_ptr<int>();
int cnt;
const int num_threads = 1024;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "sum_reduce_cuda", ([&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(feats_out));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipMemcpyAsync(&cnt, inclusive_sum_ptr + num_feats - 1, sizeof(int), hipMemcpyDeviceToHost, stream);
hipLaunchKernelGGL(( sum_reduce_cuda_kernel<scalar_t>), dim3((num_feats+num_threads-1)/num_threads), dim3(num_threads), 0, stream,
num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
inclusive_sum_ptr);
}));
return cnt;
}
void cumsum_cuda_impl(
int64_t num_feats,
int64_t feat_dim,
at::Tensor feats_in,
at::Tensor feats_out,
at::Tensor pack_indices,
bool exclusive,
bool reverse) {
int64_t num_packs = pack_indices.size(0);
int* pack_indices_ptr = pack_indices.data_ptr<int>();
int offset = exclusive ? 1 : 0;
const int num_threads = 256;
if (reverse) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "cumsum_cuda", ([&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(feats_out));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( cumsum_reverse_cuda_kernel<scalar_t>), dim3((num_packs+num_threads) / num_threads), dim3(num_threads), 0, stream,
num_packs, num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
pack_indices_ptr, offset);
}));
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "cumsum_cuda", ([&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(feats_out));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( cumsum_cuda_kernel<scalar_t>), dim3((num_packs+num_threads) / num_threads), dim3(num_threads), 0, stream,
num_packs, num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
pack_indices_ptr, offset);
}));
}
}
void cumprod_cuda_impl(
int64_t num_feats,
int64_t feat_dim,
at::Tensor feats_in,
at::Tensor feats_out,
at::Tensor pack_indices,
bool exclusive,
bool reverse) {
int64_t num_packs = pack_indices.size(0);
int* pack_indices_ptr = pack_indices.data_ptr<int>();
int offset = exclusive ? 1 : 0;
const int num_threads = 256;
if (reverse) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "cumprod_reverse_cuda", ([&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(feats_out));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( cumprod_reverse_cuda_kernel<scalar_t>), dim3((num_packs+num_threads) / num_threads), dim3(num_threads), 0, stream,
num_packs, num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
pack_indices_ptr, offset);
}));
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "cumprod_cuda", ([&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(feats_out));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( cumprod_cuda_kernel<scalar_t>), dim3((num_packs+num_threads) / num_threads), dim3(num_threads), 0, stream,
num_packs, num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
pack_indices_ptr, offset);
}));
}
}
////////// generate rays //////////////////////////////////////////////////////////////////////////
__global__ void
generate_rays_cuda_kernel(
uint num,
uint width,
uint height,
float4x4 tf,
float3* ray_o,
float3* ray_d) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
uint px = tidx % width;
uint py = tidx / height;
float4 a = mul4x4(make_float4(0.0f, 0.0f, 1.0f, 0.0f), tf);
float4 b = mul4x4(make_float4(px, py, 0.0f, 1.0f), tf);
// float3 org = make_float3(M.m[3][0], M.m[3][1], M.m[3][2]);
ray_o[tidx] = make_float3(a.x, a.y, a.z);
ray_d[tidx] = make_float3(b.x, b.y, b.z);
}
}
void generate_primary_rays_cuda_impl(
uint width,
uint height,
float4x4& tf,
float3* ray_o,
float3* ray_d) {
uint num = width*height;
hipLaunchKernelGGL(( generate_rays_cuda_kernel), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0, num, width, height, tf, ray_o, ray_d);
}
////////// generate shadow rays /////////
__global__ void
plane_intersect_rays_cuda_kernel(
uint num,
float3* ray_o,
float3* ray_d,
float3* output,
float4 plane,
uint* info) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
float3 org = ray_o[tidx];
float3 dir = ray_d[tidx];
float a = org.x*plane.x + org.y*plane.y + org.z*plane.z + plane.w;
float b = dir.x*plane.x + dir.y*plane.y + dir.z*plane.z;
if (fabs(b) > 1e-3) {
float t = - a / b;
if (t > 0.0f) {
output[tidx] = make_float3(org.x + t*dir.x, org.y + t*dir.y, org.z + t*dir.z);
info[tidx] = 1;
} else {
info[tidx] = 0;
}
} else {
info[tidx] = 0;
}
}
}
__global__ void
compactify_shadow_rays_cuda_kernel(
uint num,
float3* p_in,
float3* p_out,
uint* map,
uint* info,
uint* prefix_sum) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num && info[tidx]) {
p_out[prefix_sum[tidx]] = p_in[tidx];
map[prefix_sum[tidx]] = tidx;
}
}
__global__ void
set_shadow_rays_cuda_kernel(
uint num,
float3* src,
float3* dst,
float3 light) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
dst[tidx] = normalize(src[tidx] - light);
src[tidx] = light;
}
}
uint generate_shadow_rays_cuda_impl(
uint num,
float3* ray_o,
float3* ray_d,
float3* src,
float3* dst,
uint* map,
float3& light,
float4& plane,
uint* info,
uint* prefix_sum) {
// set up memory for DeviceScan calls
void* temp_storage_ptr = NULL;
uint64_t temp_storage_bytes = get_cub_storage_bytes(temp_storage_ptr, info, prefix_sum, num);
at::Tensor temp_storage = at::zeros({(int64_t)temp_storage_bytes}, device(at::DeviceType::CUDA).dtype(at::kByte));
temp_storage_ptr = (void*)temp_storage.data_ptr<uint8_t>();
uint cnt = 0;
hipLaunchKernelGGL(( plane_intersect_rays_cuda_kernel), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0,
num, ray_o, ray_d, dst, plane, info);
CubDebugExit(hipcub::DeviceScan::ExclusiveSum(
temp_storage_ptr, temp_storage_bytes, info, prefix_sum, num));
hipMemcpy(&cnt, prefix_sum + num - 1, sizeof(uint), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( compactify_shadow_rays_cuda_kernel), dim3((num + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0,
num, dst, src, map, info, prefix_sum);
hipLaunchKernelGGL(( set_shadow_rays_cuda_kernel), dim3((cnt + RT_NUM_THREADS - 1) / RT_NUM_THREADS), dim3(RT_NUM_THREADS), 0, 0, cnt, src, dst, light);
return cnt;
}
} // namespace kaolin
| 0defcae0f31606ca7adf2df59deb10407a3e2200.cu | // Copyright (c) 2021,22 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#define CUB_NS_PREFIX namespace kaolin {
#define CUB_NS_POSTFIX }
#define CUB_NS_QUALIFIER ::kaolin::cub
#include <stdio.h>
#include <ATen/ATen.h>
#include <c10/cuda/CUDAGuard.h>
#ifdef EXPERIMENTAL
#include <ATen/native/cuda/KernelUtils.cuh>
#else
#include <THC/THCAtomics.cuh>
#endif
// TODO(ttakikawa): newer versions of PyTorch will migrate to <ATen/cuda/Atomics.cuh>.
// How do we manage these dependencies?
#define CUB_STDERR
#include <cub/device/device_scan.cuh>
#include "../../spc_math.h"
#include "../../spc_utils.cuh"
#include "spc_render_utils.cuh"
namespace kaolin {
using namespace at::indexing;
#define RT_NUM_THREADS 1024
////////////////////////////////////////////////////////////////////////////////////////////////
/// Constants
////////////////////////////////////////////////////////////////////////////////////////////////
__constant__ uint8_t VOXEL_ORDER[8][8] = {
{ 0, 1, 2, 4, 3, 5, 6, 7 },
{ 1, 0, 3, 5, 2, 4, 7, 6 },
{ 2, 0, 3, 6, 1, 4, 7, 5 },
{ 3, 1, 2, 7, 0, 5, 6, 4 },
{ 4, 0, 5, 6, 1, 2, 7, 3 },
{ 5, 1, 4, 7, 0, 3, 6, 2 },
{ 6, 2, 4, 7, 0, 3, 5, 1 },
{ 7, 3, 5, 6, 1, 2, 4, 0 }
};
////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernels
////////////////////////////////////////////////////////////////////////////////////////////////
// This function will initialize the nuggets array with each ray pointing to the octree root
__global__ void
init_nuggets_cuda_kernel(
const uint num,
uint2* nuggets) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
nuggets[tidx].x = tidx; // ray idx
nuggets[tidx].y = 0; // point idx
}
}
// This function will iterate over the nuggets (ray intersection proposals) and determine if they
// result in an intersection. If they do, the info tensor is populated with the # of child nodes
// as determined by the input octree.
__global__ void
decide_cuda_kernel(
const uint num,
const point_data* __restrict__ points,
const float3* __restrict__ ray_o,
const float3* __restrict__ ray_d,
const uint2* __restrict__ nuggets,
uint* __restrict__ info,
const uint8_t* __restrict__ octree,
const uint32_t level,
const uint32_t not_done) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
uint ridx = nuggets[tidx].x;
uint pidx = nuggets[tidx].y;
point_data p = points[pidx];
float3 o = ray_o[ridx];
float3 d = ray_d[ridx];
// Radius of voxel
float r = 1.0 / ((float)(0x1 << level));
// Transform to [-1, 1]
const float3 vc = make_float3(
fmaf(r, fmaf(2.0, p.x, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.y, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.z, 1.0), -1.0f));
// Compute aux info (precompute to optimize)
float3 sgn = ray_sgn(d);
float3 ray_inv = make_float3(1.0 / d.x, 1.0 / d.y, 1.0 / d.z);
float depth = ray_aabb(o, d, ray_inv, sgn, vc, r);
if (not_done){
if (depth != 0.0)
info[tidx] = __popc(octree[pidx]);
else
info[tidx] = 0;
}
else { // at bottom
if (depth > 0.0)
info[tidx] = 1;
else
info[tidx] = 0;
}
}
}
// Overloaded version of function above that returns depth of voxel/ ray entry points
__global__ void
decide_cuda_kernel(
const uint num,
const point_data* __restrict__ points,
const float3* __restrict__ ray_o,
const float3* __restrict__ ray_d,
const uint2* __restrict__ nuggets,
float* depth,
uint* __restrict__ info,
const uint8_t* __restrict__ octree,
const uint32_t level) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
uint ridx = nuggets[tidx].x;
uint pidx = nuggets[tidx].y;
point_data p = points[pidx];
float3 o = ray_o[ridx];
float3 d = ray_d[ridx];
// Radius of voxel
float r = 1.0 / ((float)(0x1 << level));
// Transform to [-1, 1]
const float3 vc = make_float3(
fmaf(r, fmaf(2.0, p.x, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.y, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.z, 1.0), -1.0f));
// Compute aux info (precompute to optimize)
float3 sgn = ray_sgn(d);
float3 ray_inv = make_float3(1.0 / d.x, 1.0 / d.y, 1.0 / d.z);
depth[tidx] = ray_aabb(o, d, ray_inv, sgn, vc, r);
// Perform AABB check
if (depth[tidx] > 0.0){
info[tidx] = 1; // mark to keep
} else {
info[tidx] = 0;
}
}
}
// Overloaded version of function above that returns depth of voxel/ ray entry and exit points
__global__ void
decide_cuda_kernel(
const uint num,
const point_data* __restrict__ points,
const float3* __restrict__ ray_o,
const float3* __restrict__ ray_d,
const uint2* __restrict__ nuggets,
float2* __restrict__ depth,
uint* __restrict__ info,
const uint8_t* __restrict__ octree,
const uint32_t level) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
uint ridx = nuggets[tidx].x;
uint pidx = nuggets[tidx].y;
point_data p = points[pidx];
float3 o = ray_o[ridx];
float3 d = ray_d[ridx];
// Radius of voxel
float r = 1.0 / ((float)(0x1 << level));
// Transform to [-1, 1]
const float3 vc = make_float3(
fmaf(r, fmaf(2.0, p.x, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.y, 1.0), -1.0f),
fmaf(r, fmaf(2.0, p.z, 1.0), -1.0f));
// Compute aux info (precompute to optimize)
float3 sgn = ray_sgn(d);
float3 ray_inv = make_float3(1.0 / d.x, 1.0 / d.y, 1.0 / d.z);
depth[tidx] = ray_aabb_with_exit(o, d, ray_inv, vc, r);
// Perform AABB check
if (depth[tidx].x > 0.0 && depth[tidx].y > 0.0){
info[tidx] = 1; // mark to keep
} else {
info[tidx] = 0;
}
}
}
// This function will iterate over the nugget array, and for each nuggets stores the child indices of the
// nuggets (as defined by the octree tensor)
__global__ void
subdivide_cuda_kernel(
const uint num,
const uint2* __restrict__ nuggets_in,
uint2* __restrict__ nuggets_out,
const float3* __restrict__ ray_o,
const point_data* __restrict__ points,
const uint8_t* __restrict__ octree,
const uint* __restrict__ exclusive_sum,
const uint* __restrict__ info,
const uint* __restrict__ prefix_sum,
const uint32_t level) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num && info[tidx]) {
uint ridx = nuggets_in[tidx].x;
int pidx = nuggets_in[tidx].y;
point_data p = points[pidx];
uint base_idx = prefix_sum[tidx];
uint8_t o = octree[pidx];
uint s = exclusive_sum[pidx];
float scale = 1.0 / ((float)(0x1 << level));
float3 org = ray_o[ridx];
float x = (0.5f * org.x + 0.5f) - scale*((float)p.x + 0.5);
float y = (0.5f * org.y + 0.5f) - scale*((float)p.y + 0.5);
float z = (0.5f * org.z + 0.5f) - scale*((float)p.z + 0.5);
uint code = 0;
if (x > 0) code = 4;
if (y > 0) code += 2;
if (z > 0) code += 1;
for (uint i = 0; i < 8; i++) {
uint j = VOXEL_ORDER[code][i];
if (o&(0x1 << j)) {
uint cnt = __popc(o&((0x2 << j) - 1)); // count set bits up to child - inclusive sum
nuggets_out[base_idx].y = s + cnt;
nuggets_out[base_idx++].x = ridx;
}
}
}
}
template<typename scalar_t>
__global__ void
mark_pack_boundaries_cuda_kernel(
const int64_t num,
const scalar_t* __restrict__ pack_ids,
uint* __restrict__ boundaries) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
if (tidx == 0) {
boundaries[tidx] = 1;
} else {
boundaries[tidx] = pack_ids[tidx - 1] == pack_ids[tidx] ? 0 : 1;
}
}
}
// This function will take a buffer and remove the zero pads
template<typename scalar_t>
__global__ void
compactify_cuda_kernel(
const uint num,
const scalar_t* __restrict__ buffer_in,
scalar_t* __restrict__ buffer_out,
const uint* __restrict__ info,
const uint* __restrict__ prefix_sum) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num && info[tidx]) {
buffer_out[prefix_sum[tidx]] = buffer_in[tidx];
}
}
template<typename scalar_t>
__global__ void
diff_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
const int64_t* __restrict__ pack_indices) {
int64_t tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int64_t upper_bound = (tidx == num_packs-1) ? num_feats : pack_indices[tidx+1];
for (int64_t i=pack_indices[tidx]; i<upper_bound-1; ++i) {
for (int64_t j=0; j<feat_dim; ++j) {
feats_out[i * feat_dim + j] = feats_in[(i+1) * feat_dim + j] - feats_in[i * feat_dim + j];
}
}
}
}
template<typename scalar_t>
__global__ void
sum_reduce_cuda_kernel(
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
const int32_t* __restrict__ inclusive_sum) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_feats) {
for (int i=0; i<feat_dim; ++i) {
int idx = (inclusive_sum[tidx]-1) * feat_dim + i;
# ifdef EXPERIMENTAL
int numel = num_feats*feat_dim;
at::native::fastAtomicAdd(feats_out, idx, numel, feats_in[tidx * feat_dim + i], true);
# else
gpuAtomicAdd(feats_out + idx, feats_in[tidx * feat_dim + i]);
# endif
}
}
}
// This kernel is the same as sum_reduce but avoids atomic add by packing the ops.
// It however will cause thread divergence.
template<typename scalar_t>
__global__ void
packed_sum_reduce_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
const int64_t* __restrict__ pack_indices) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int64_t upper_bound = (tidx == num_packs-1) ? num_feats*feat_dim : pack_indices[tidx+1];
for (int i=pack_indices[tidx]; i<upper_bound-1; ++i) {
for (int j=0; j<feat_dim; ++j) {
feats_out[i * feat_dim + j] += feats_in[i * feat_dim + j];
}
}
}
}
template<typename scalar_t>
__global__ void
cumprod_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
int32_t* __restrict__ pack_indices, // maps idx of pack -> beginning of global idx
int32_t offset) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int upper_bound = (tidx == num_packs-1) ? num_feats : pack_indices[tidx+1];
int begin = pack_indices[tidx];
if (offset == 0) {
for (int j=0; j<feat_dim; ++j){
feats_out[begin * feat_dim + j] = feats_in[begin * feat_dim + j];
}
}
for (int i=begin+1; i<upper_bound; ++i) {
for (int j=0; j<feat_dim; ++j){
feats_out[i * feat_dim + j] = feats_in[(i-offset) * feat_dim + j] * feats_out[(i-1) * feat_dim + j];
}
}
}
}
template<typename scalar_t>
__global__ void
cumprod_reverse_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
int32_t* __restrict__ pack_indices, // maps idx of pack -> beginning of global idx
int32_t offset) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int upper_bound = (tidx == num_packs-1) ? num_feats : pack_indices[tidx+1];
int begin = pack_indices[tidx];
if (offset == 0) {
for (int j=0; j<feat_dim; ++j){
feats_out[(upper_bound-1) * feat_dim + j] = feats_in[(upper_bound-1) * feat_dim + j];
}
}
for (int i=upper_bound-2; i>=begin; --i) {
for (int j=0; j<feat_dim; ++j){
feats_out[i * feat_dim + j] = feats_in[(i+offset) * feat_dim + j] * feats_out[(i+1) * feat_dim + j];
}
}
}
}
template<typename scalar_t>
__global__ void
cumsum_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
int32_t* __restrict__ pack_indices, // maps idx of pack -> beginning of global idx
int32_t offset) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int upper_bound = (tidx == num_packs-1) ? num_feats : pack_indices[tidx+1];
int begin = pack_indices[tidx];
if (offset == 0) {
for (int j=0; j<feat_dim; ++j){
feats_out[begin * feat_dim + j] = feats_in[begin * feat_dim + j];
}
}
for (int i=begin+1; i<upper_bound; ++i) {
for (int j=0; j<feat_dim; ++j){
feats_out[i * feat_dim + j] = feats_in[(i-offset) * feat_dim + j] + feats_out[(i-1) * feat_dim + j];
}
}
}
}
template<typename scalar_t>
__global__ void
cumsum_reverse_cuda_kernel(
const int64_t num_packs,
const int64_t num_feats,
const int64_t feat_dim,
const scalar_t* __restrict__ feats_in,
scalar_t* __restrict__ feats_out,
int32_t* __restrict__ pack_indices, // maps idx of pack -> beginning of global idx
int32_t offset) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num_packs) {
int upper_bound = (tidx == num_packs-1) ? num_feats : pack_indices[tidx+1];
int begin = pack_indices[tidx];
if (offset == 0) {
for (int j=0; j<feat_dim; ++j){
feats_out[(upper_bound-1) * feat_dim + j] = feats_in[(upper_bound-1) * feat_dim + j];
}
}
for (int i=upper_bound-2; i>=begin; --i) {
for (int j=0; j<feat_dim; ++j){
feats_out[i * feat_dim + j] = feats_in[(i+offset) * feat_dim + j] + feats_out[(i+1) * feat_dim + j];
}
}
}
}
std::vector<at::Tensor> raytrace_cuda_impl(
at::Tensor octree,
at::Tensor points,
at::Tensor pyramid,
at::Tensor exclusive_sum,
at::Tensor ray_o,
at::Tensor ray_d,
uint32_t max_level,
uint32_t target_level,
bool return_depth,
bool with_exit) {
uint num = ray_o.size(0);
uint8_t* octree_ptr = octree.data_ptr<uint8_t>();
point_data* points_ptr = reinterpret_cast<point_data*>(points.data_ptr<short>());
uint* exclusive_sum_ptr = reinterpret_cast<uint*>(exclusive_sum.data_ptr<int>());
float3* ray_o_ptr = reinterpret_cast<float3*>(ray_o.data_ptr<float>());
float3* ray_d_ptr = reinterpret_cast<float3*>(ray_d.data_ptr<float>());
// allocate local GPU storage
at::Tensor nuggets0 = at::empty({num, 2}, octree.options().dtype(at::kInt));
at::Tensor nuggets1;
uint depth_dim = with_exit ? 2 : 1;
at::Tensor depths0;
at::Tensor depths1;
// Generate proposals (first proposal is root node)
init_nuggets_cuda_kernel<<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(
num, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()));
uint cnt, buffer = 0;
for (uint32_t l = 0; l <= target_level; l++) {
at::Tensor info = at::empty({num+1}, octree.options().dtype(at::kInt));
uint* info_ptr = reinterpret_cast<uint*>(info.data_ptr<int>());
// Do the proposals hit?
if (l == target_level && return_depth) {
depths0 = at::empty({num, depth_dim}, octree.options().dtype(at::kFloat));
if (with_exit) {
decide_cuda_kernel<<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(
num, points_ptr, ray_o_ptr, ray_d_ptr, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()),
reinterpret_cast<float2*>(l == target_level ? depths0.data_ptr<float>() : 0),
info_ptr, octree_ptr, l);
} else {
decide_cuda_kernel<<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(
num, points_ptr, ray_o_ptr, ray_d_ptr, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()),
l == target_level ? depths0.data_ptr<float>() : 0, info_ptr, octree_ptr, l);
}
}
else {
decide_cuda_kernel<<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(
num, points_ptr, ray_o_ptr, ray_d_ptr, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()),
info_ptr, octree_ptr, l, target_level - l);
}
at::Tensor prefix_sum = at::empty({num+1}, octree.options().dtype(at::kInt));
uint* prefix_sum_ptr = reinterpret_cast<uint*>(prefix_sum.data_ptr<int>());
// set first element to zero
CubDebugExit(cudaMemcpy(prefix_sum_ptr, &buffer, sizeof(uint), cudaMemcpyHostToDevice));
// set up memory for DeviceScan calls
void* temp_storage_ptr = NULL;
uint64_t temp_storage_bytes = get_cub_storage_bytes(
temp_storage_ptr, info_ptr, prefix_sum_ptr, num+1);
at::Tensor temp_storage = at::empty({(int64_t)temp_storage_bytes}, octree.options());
temp_storage_ptr = (void*)temp_storage.data_ptr<uint8_t>();
CubDebugExit(cub::DeviceScan::InclusiveSum(
temp_storage_ptr, temp_storage_bytes, info_ptr,
prefix_sum_ptr + 1, num)); //start sum on second element
cudaMemcpy(&cnt, prefix_sum_ptr + num, sizeof(uint), cudaMemcpyDeviceToHost);
// allocate local GPU storage
nuggets1 = at::empty({cnt, 2}, octree.options().dtype(at::kInt));
// miss everything
if (cnt == 0) {
num = 0;
nuggets0 = nuggets1;
if (return_depth) depths1 = at::empty({0, depth_dim}, octree.options().dtype(at::kFloat));
break;
}
// Subdivide if more levels remain, repeat
if (l < target_level) {
subdivide_cuda_kernel<<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(
num, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()), reinterpret_cast<uint2*>(nuggets1.data_ptr<int>()), ray_o_ptr, points_ptr,
octree_ptr, exclusive_sum_ptr, info_ptr, prefix_sum_ptr, l);
} else {
compactify_cuda_kernel<uint2><<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(
num, reinterpret_cast<uint2*>(nuggets0.data_ptr<int>()), reinterpret_cast<uint2*>(nuggets1.data_ptr<int>()),
info_ptr, prefix_sum_ptr);
if (return_depth) {
depths1 = at::empty({cnt, depth_dim}, octree.options().dtype(at::kFloat));
if (with_exit) {
compactify_cuda_kernel<float2><<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(
num, reinterpret_cast<float2*>(depths0.data_ptr<float>()),
reinterpret_cast<float2*>(depths1.data_ptr<float>()),
info_ptr, prefix_sum_ptr);
} else {
compactify_cuda_kernel<float><<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(
num, depths0.data_ptr<float>(), depths1.data_ptr<float>(),
info_ptr, prefix_sum_ptr);
}
}
}
nuggets0 = nuggets1;
num = cnt;
}
if (return_depth) {
return { nuggets0.index({Slice(0, num)}).contiguous(),
depths1.index({Slice(0, num)}).contiguous() };
} else {
return { nuggets0.index({Slice(0, num)}).contiguous() };
}
}
void mark_pack_boundaries_cuda_impl(
at::Tensor pack_ids,
at::Tensor boundaries) {
int64_t num = pack_ids.size(0);
AT_DISPATCH_INTEGRAL_TYPES(pack_ids.type(), "mark_pack_boundaries_cuda", ([&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(boundaries));
auto stream = at::cuda::getCurrentCUDAStream();
mark_pack_boundaries_cuda_kernel<<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS, 0, stream>>>(
num,
pack_ids.data_ptr<scalar_t>(),
reinterpret_cast<uint*>(boundaries.data_ptr<int>()));
}));
}
void diff_cuda_impl(
int64_t num_packs,
int64_t num_feats,
int64_t feat_dim,
at::Tensor feats_in,
at::Tensor feats_out,
at::Tensor pack_indices){
int64_t* pack_indices_ptr = pack_indices.data_ptr<int64_t>();
const int num_threads = 256;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "diff_cuda", ([&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(feats_out));
auto stream = at::cuda::getCurrentCUDAStream();
diff_cuda_kernel<scalar_t><<<(num_packs+num_threads-1)/num_threads, num_threads, 0, stream>>>(
num_packs, num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
pack_indices_ptr);
}));
}
void inclusive_sum_cuda_impl(
int64_t num,
at::Tensor info,
at::Tensor inclusive_sum){
int* info_ptr = info.data_ptr<int>();
int* inclusive_sum_ptr = inclusive_sum.data_ptr<int>();
void* temp_storage_ptr = NULL;
uint64_t temp_storage_bytes = get_cub_storage_bytes(
temp_storage_ptr, reinterpret_cast<uint*>(info_ptr), reinterpret_cast<uint*>(inclusive_sum_ptr), num);
at::Tensor temp_storage = at::zeros({(int64_t)temp_storage_bytes}, device(at::DeviceType::CUDA).dtype(at::kByte));
temp_storage_ptr = (void*)temp_storage.data_ptr<uint8_t>();
CubDebugExit(cub::DeviceScan::InclusiveSum(temp_storage_ptr, temp_storage_bytes, info_ptr, inclusive_sum_ptr, num));
}
int sum_reduce_cuda_impl(
int64_t num_feats,
int64_t feat_dim,
at::Tensor feats_in,
at::Tensor feats_out,
at::Tensor inclusive_sum) {
int* inclusive_sum_ptr = inclusive_sum.data_ptr<int>();
int cnt;
const int num_threads = 1024;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "sum_reduce_cuda", ([&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(feats_out));
auto stream = at::cuda::getCurrentCUDAStream();
cudaMemcpyAsync(&cnt, inclusive_sum_ptr + num_feats - 1, sizeof(int), cudaMemcpyDeviceToHost, stream);
sum_reduce_cuda_kernel<scalar_t><<<(num_feats+num_threads-1)/num_threads, num_threads, 0, stream>>>(
num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
inclusive_sum_ptr);
}));
return cnt;
}
void cumsum_cuda_impl(
int64_t num_feats,
int64_t feat_dim,
at::Tensor feats_in,
at::Tensor feats_out,
at::Tensor pack_indices,
bool exclusive,
bool reverse) {
int64_t num_packs = pack_indices.size(0);
int* pack_indices_ptr = pack_indices.data_ptr<int>();
int offset = exclusive ? 1 : 0;
const int num_threads = 256;
if (reverse) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "cumsum_cuda", ([&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(feats_out));
auto stream = at::cuda::getCurrentCUDAStream();
cumsum_reverse_cuda_kernel<scalar_t><<<(num_packs+num_threads) / num_threads, num_threads, 0, stream>>>(
num_packs, num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
pack_indices_ptr, offset);
}));
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "cumsum_cuda", ([&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(feats_out));
auto stream = at::cuda::getCurrentCUDAStream();
cumsum_cuda_kernel<scalar_t><<<(num_packs+num_threads) / num_threads, num_threads, 0, stream>>>(
num_packs, num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
pack_indices_ptr, offset);
}));
}
}
void cumprod_cuda_impl(
int64_t num_feats,
int64_t feat_dim,
at::Tensor feats_in,
at::Tensor feats_out,
at::Tensor pack_indices,
bool exclusive,
bool reverse) {
int64_t num_packs = pack_indices.size(0);
int* pack_indices_ptr = pack_indices.data_ptr<int>();
int offset = exclusive ? 1 : 0;
const int num_threads = 256;
if (reverse) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "cumprod_reverse_cuda", ([&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(feats_out));
auto stream = at::cuda::getCurrentCUDAStream();
cumprod_reverse_cuda_kernel<scalar_t><<<(num_packs+num_threads) / num_threads, num_threads, 0, stream>>>(
num_packs, num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
pack_indices_ptr, offset);
}));
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(feats_in.type(), "cumprod_cuda", ([&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(feats_out));
auto stream = at::cuda::getCurrentCUDAStream();
cumprod_cuda_kernel<scalar_t><<<(num_packs+num_threads) / num_threads, num_threads, 0, stream>>>(
num_packs, num_feats, feat_dim,
feats_in.data_ptr<scalar_t>(),
feats_out.data_ptr<scalar_t>(),
pack_indices_ptr, offset);
}));
}
}
////////// generate rays //////////////////////////////////////////////////////////////////////////
__global__ void
generate_rays_cuda_kernel(
uint num,
uint width,
uint height,
float4x4 tf,
float3* ray_o,
float3* ray_d) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
uint px = tidx % width;
uint py = tidx / height;
float4 a = mul4x4(make_float4(0.0f, 0.0f, 1.0f, 0.0f), tf);
float4 b = mul4x4(make_float4(px, py, 0.0f, 1.0f), tf);
// float3 org = make_float3(M.m[3][0], M.m[3][1], M.m[3][2]);
ray_o[tidx] = make_float3(a.x, a.y, a.z);
ray_d[tidx] = make_float3(b.x, b.y, b.z);
}
}
void generate_primary_rays_cuda_impl(
uint width,
uint height,
float4x4& tf,
float3* ray_o,
float3* ray_d) {
uint num = width*height;
generate_rays_cuda_kernel<<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(num, width, height, tf, ray_o, ray_d);
}
////////// generate shadow rays /////////
__global__ void
plane_intersect_rays_cuda_kernel(
uint num,
float3* ray_o,
float3* ray_d,
float3* output,
float4 plane,
uint* info) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
float3 org = ray_o[tidx];
float3 dir = ray_d[tidx];
float a = org.x*plane.x + org.y*plane.y + org.z*plane.z + plane.w;
float b = dir.x*plane.x + dir.y*plane.y + dir.z*plane.z;
if (fabs(b) > 1e-3) {
float t = - a / b;
if (t > 0.0f) {
output[tidx] = make_float3(org.x + t*dir.x, org.y + t*dir.y, org.z + t*dir.z);
info[tidx] = 1;
} else {
info[tidx] = 0;
}
} else {
info[tidx] = 0;
}
}
}
__global__ void
compactify_shadow_rays_cuda_kernel(
uint num,
float3* p_in,
float3* p_out,
uint* map,
uint* info,
uint* prefix_sum) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num && info[tidx]) {
p_out[prefix_sum[tidx]] = p_in[tidx];
map[prefix_sum[tidx]] = tidx;
}
}
__global__ void
set_shadow_rays_cuda_kernel(
uint num,
float3* src,
float3* dst,
float3 light) {
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < num) {
dst[tidx] = normalize(src[tidx] - light);
src[tidx] = light;
}
}
uint generate_shadow_rays_cuda_impl(
uint num,
float3* ray_o,
float3* ray_d,
float3* src,
float3* dst,
uint* map,
float3& light,
float4& plane,
uint* info,
uint* prefix_sum) {
// set up memory for DeviceScan calls
void* temp_storage_ptr = NULL;
uint64_t temp_storage_bytes = get_cub_storage_bytes(temp_storage_ptr, info, prefix_sum, num);
at::Tensor temp_storage = at::zeros({(int64_t)temp_storage_bytes}, device(at::DeviceType::CUDA).dtype(at::kByte));
temp_storage_ptr = (void*)temp_storage.data_ptr<uint8_t>();
uint cnt = 0;
plane_intersect_rays_cuda_kernel<<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(
num, ray_o, ray_d, dst, plane, info);
CubDebugExit(cub::DeviceScan::ExclusiveSum(
temp_storage_ptr, temp_storage_bytes, info, prefix_sum, num));
cudaMemcpy(&cnt, prefix_sum + num - 1, sizeof(uint), cudaMemcpyDeviceToHost);
compactify_shadow_rays_cuda_kernel<<<(num + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(
num, dst, src, map, info, prefix_sum);
set_shadow_rays_cuda_kernel<<<(cnt + RT_NUM_THREADS - 1) / RT_NUM_THREADS, RT_NUM_THREADS>>>(cnt, src, dst, light);
return cnt;
}
} // namespace kaolin
|
eeb7358c30472f89aed6b97f21071b4859a063d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Equihash CUDA solver
// Copyright (c) 2016 John Tromp
// Copyright (c) 2018 The SnowGem Project
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#define htole32(x) (x)
#define HAVE_DECL_HTOLE32 1
#include "../cpu_tromp/equi.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <functional>
#include <vector>
#include <iostream>
#include "eqcuda.hpp"
#include "blake2b.cu"
typedef uint16_t u16;
typedef uint64_t u64;
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1<<BUCKBITS;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS + 1 + 1;
static const u32 SLOTRANGE = 1 << SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE - 1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1 << RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS - 1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 10;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
#ifdef XINTREE
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
__device__ tree(const u32 bid, const u32 s0, const u32 s1) {
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
__device__ bool prob_disjoint(const tree other) const {
tree xort(bid_s0_s1_x ^ other.bid_s0_s1_x);
return xort.bucketid() || (xort.slotid0() && xort.slotid1());
// next two tests catch much fewer cases and are therefore skipped
// && slotid0() != other.slotid1() && slotid1() != other.slotid0()
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK + 1) / 2];
bucket1 *trees1[WK / 2];
};
typedef u32 bsizes[NBUCKETS];
struct equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) {
setheader(&blake_ctx, header, len, nonce, nlen);
checkCudaErrors(hipMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ bool orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i = 0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size + i];
indices[size + i] = tmp;
}
}
return false;
}
__device__ bool listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
return false;
}
__device__ bool listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
return listindices1(buck[t.slotid0()].attr, indices) ||
listindices1(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
return listindices2(buck[t.slotid0()].attr, indices) ||
listindices2(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
return listindices3(buck[t.slotid0()].attr, indices) ||
listindices3(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
return listindices4(buck[t.slotid0()].attr, indices) ||
listindices4(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
#if WK == 9
__device__ bool listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
return listindices5(buck[t.slotid0()].attr, indices) ||
listindices5(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
return listindices6(buck[t.slotid0()].attr, indices) ||
listindices6(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
return listindices7(buck[t.slotid0()].attr, indices) ||
listindices7(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
return listindices8(buck[t.slotid0()].attr, indices) ||
listindices8(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
#endif
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
if (listindices9(t, prf)) return;
#elif WK==5
if (listindices5(t, prf)) return;
#elif WK==3
if (listindices3(t, prf)) return;
#else
#error not implemented
#endif
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#elif WK==3
listindices3(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(hipMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), hipMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6);
binsizes[bsize]++;
}
for (u32 i = 0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r-1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 8 == 4 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3f) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 4 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif DIGITBITS % 4 == 0 && RESTBITS == 6
return pslot->hash->bytes[prevbo] & 0x3f;
#elif DIGITBITS % 4 == 0 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif DIGITBITS % 4 == 0 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3) << 8 | pslot->hash->bytes[prevbo+1];
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ void addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0); // always 23 ?
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN / 8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#ifdef XINTREE
const u32 xhash = ph[1] & 0xf;
#endif
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE+i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE+i);
#endif
memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if DIGITBITS % 8 == 4 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif DIGITBITS % 8 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if DIGITBITS % 8 == 4 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]);
u32 xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
#elif DIGITBITS % 8 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i = htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
// bucket mask
static const u32 BUCKMASK = NBUCKETS-1;
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK-1)/2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) { // assume WK odd
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash) && pslot0->attr.prob_disjoint(pslot1->attr)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id)
: threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
eq = new equi(threadsperblock * totalblocks);
sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096);
solutions = (proof*)(((long long)sol_memory + 4095) & -4096);
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
checkCudaErrors(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
checkCudaErrors(hipMalloc((void**)&heap0, sizeof(digit0)));
checkCudaErrors(hipMalloc((void**)&heap1, sizeof(digit1)));
for (u32 r = 0; r < WK; r++)
if ((r & 1) == 0)
eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2);
else
eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2);
checkCudaErrors(hipMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(hipMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof)));
checkCudaErrors(hipMalloc((void**)&device_eq, sizeof(equi)));
}
eq_cuda_context::~eq_cuda_context()
{
/*checkCudaErrors(hipFree(eq->nslots));
checkCudaErrors(hipFree(eq->sols));
checkCudaErrors(hipFree(eq->hta.trees0[0]));
checkCudaErrors(hipFree(eq->hta.trees1[0]));*/
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
free(sol_memory);
delete eq;
}
void eq_cuda_context::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
checkCudaErrors(hipSetDevice(device_id));
eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len);
checkCudaErrors(hipMemcpy(device_eq, eq, sizeof(equi), hipMemcpyHostToDevice));
digitH << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
digit_1 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit2 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit3 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit4 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit5 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit6 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit7 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit8 << <totalblocks, threadsperblock >> >(device_eq);
#else
for (u32 r = 1; r < WK; r++) {
r & 1 ? digitO << <totalblocks, threadsperblock >> >(device_eq, r)
: digitE << <totalblocks, threadsperblock >> >(device_eq, r);
}
#endif
if (cancelf()) return;
digitK << <totalblocks, threadsperblock >> >(device_eq);
checkCudaErrors(hipMemcpy(eq, device_eq, sizeof(equi), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), hipMemcpyDeviceToHost));
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf()) return;
}
hashdonef();
} | eeb7358c30472f89aed6b97f21071b4859a063d1.cu | // Equihash CUDA solver
// Copyright (c) 2016 John Tromp
// Copyright (c) 2018 The SnowGem Project
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#define htole32(x) (x)
#define HAVE_DECL_HTOLE32 1
#include "../cpu_tromp/equi.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <functional>
#include <vector>
#include <iostream>
#include "eqcuda.hpp"
#include "blake2b.cu"
typedef uint16_t u16;
typedef uint64_t u64;
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1<<BUCKBITS;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS + 1 + 1;
static const u32 SLOTRANGE = 1 << SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE - 1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1 << RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS - 1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 10;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
#ifdef XINTREE
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
__device__ tree(const u32 bid, const u32 s0, const u32 s1) {
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
__device__ bool prob_disjoint(const tree other) const {
tree xort(bid_s0_s1_x ^ other.bid_s0_s1_x);
return xort.bucketid() || (xort.slotid0() && xort.slotid1());
// next two tests catch much fewer cases and are therefore skipped
// && slotid0() != other.slotid1() && slotid1() != other.slotid0()
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r + 1) * DIGITBITS;
#else
const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK + 1) / 2];
bucket1 *trees1[WK / 2];
};
typedef u32 bsizes[NBUCKETS];
struct equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) {
setheader(&blake_ctx, header, len, nonce, nlen);
checkCudaErrors(cudaMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ bool orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i = 0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size + i];
indices[size + i] = tmp;
}
}
return false;
}
__device__ bool listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
return false;
}
__device__ bool listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
return listindices1(buck[t.slotid0()].attr, indices) ||
listindices1(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
return listindices2(buck[t.slotid0()].attr, indices) ||
listindices2(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
return listindices3(buck[t.slotid0()].attr, indices) ||
listindices3(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
return listindices4(buck[t.slotid0()].attr, indices) ||
listindices4(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
#if WK == 9
__device__ bool listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
return listindices5(buck[t.slotid0()].attr, indices) ||
listindices5(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
return listindices6(buck[t.slotid0()].attr, indices) ||
listindices6(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
return listindices7(buck[t.slotid0()].attr, indices) ||
listindices7(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
return listindices8(buck[t.slotid0()].attr, indices) ||
listindices8(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
#endif
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
if (listindices9(t, prf)) return;
#elif WK==5
if (listindices5(t, prf)) return;
#elif WK==3
if (listindices3(t, prf)) return;
#else
#error not implemented
#endif
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#elif WK==3
listindices3(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(cudaMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), cudaMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6);
binsizes[bsize]++;
}
for (u32 i = 0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r-1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 8 == 4 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3f) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 4 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif DIGITBITS % 4 == 0 && RESTBITS == 6
return pslot->hash->bytes[prevbo] & 0x3f;
#elif DIGITBITS % 4 == 0 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif DIGITBITS % 4 == 0 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3) << 8 | pslot->hash->bytes[prevbo+1];
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ void addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0); // always 23 ?
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN / 8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#ifdef XINTREE
const u32 xhash = ph[1] & 0xf;
#endif
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE+i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE+i);
#endif
memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if DIGITBITS % 8 == 4 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif DIGITBITS % 8 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
xhash &= 0xf;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if DIGITBITS % 8 == 4 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]);
u32 xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
#elif DIGITBITS % 8 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4)
| (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6)
| (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i = htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
// bucket mask
static const u32 BUCKMASK = NBUCKETS-1;
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK-1)/2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) { // assume WK odd
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash) && pslot0->attr.prob_disjoint(pslot1->attr)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id)
: threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
eq = new equi(threadsperblock * totalblocks);
sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096);
solutions = (proof*)(((long long)sol_memory + 4095) & -4096);
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
checkCudaErrors(cudaMalloc((void**)&heap0, sizeof(digit0)));
checkCudaErrors(cudaMalloc((void**)&heap1, sizeof(digit1)));
for (u32 r = 0; r < WK; r++)
if ((r & 1) == 0)
eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2);
else
eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2);
checkCudaErrors(cudaMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(cudaMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof)));
checkCudaErrors(cudaMalloc((void**)&device_eq, sizeof(equi)));
}
eq_cuda_context::~eq_cuda_context()
{
/*checkCudaErrors(cudaFree(eq->nslots));
checkCudaErrors(cudaFree(eq->sols));
checkCudaErrors(cudaFree(eq->hta.trees0[0]));
checkCudaErrors(cudaFree(eq->hta.trees1[0]));*/
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
free(sol_memory);
delete eq;
}
void eq_cuda_context::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
checkCudaErrors(cudaSetDevice(device_id));
eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len);
checkCudaErrors(cudaMemcpy(device_eq, eq, sizeof(equi), cudaMemcpyHostToDevice));
digitH << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
digit_1 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit2 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit3 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit4 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit5 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit6 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit7 << <totalblocks, threadsperblock >> >(device_eq);
if (cancelf()) return;
digit8 << <totalblocks, threadsperblock >> >(device_eq);
#else
for (u32 r = 1; r < WK; r++) {
r & 1 ? digitO << <totalblocks, threadsperblock >> >(device_eq, r)
: digitE << <totalblocks, threadsperblock >> >(device_eq, r);
}
#endif
if (cancelf()) return;
digitK << <totalblocks, threadsperblock >> >(device_eq);
checkCudaErrors(cudaMemcpy(eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), cudaMemcpyDeviceToHost));
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf()) return;
}
hashdonef();
} |
2b6519c0e4248e90afc8830f9e425e21694a7a9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "util.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "rocblas.h"
#include <algorithm> // std::min std::max
using namespace std;
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
// Resolucin de ecuaciones matriciales. Usando doble presicin
// A X = B, donde es un escalar, X y B R^{mn}, y A R^{mm} es una matriz triangular (inferior para esta implementacin).
// Esto equivale a resolver n sistemas de ecuaciones de forma Ax_i = b_i, donde b_i es una columna de B y x_i es la solucin buscada
// Al ser la matriz triangular el sistema de ecuaciones lineales ya viene "escalerizado".
// Ej 2.1 a) Caso 32 x n
// Para resolver estos sistemas:
// - Cada bloque de threads debe sobreescribir un tile de B con el resultado de la operacin.
// - Cada warp del bloque procesa una columna de 32 elementos (resuelve uno de los n sistemas de ecuaciones). Como todos usan A hay que guardarla en memoria rpida.
// - Cada thread del warp calcula un elemento de la columna().
// - Cada thread lee datos calculados por los threads del warp del ndice anterior. Para compartir datos entre los hilos del warp tenemos las siguientes opciones:
// Ej 2.1 a-1) Kernel para el caso 32 x n con los threads de un warp comunicandose a travs memoria compartida
// El paralelismo a nivel de warps es implicito, porque dentro de un warp se avanza en el cdigo secuencialmente
__global__ void dtrsm_32_shared_kernel(const double alpha, double *d_A, int lda, double *d_B, int ldb, int stride_A, int stride_B) {
__shared__ double shared_A[TILE_WIDTH][TILE_HEIGHT];
__shared__ double tile_B[TILE_WIDTH][TILE_HEIGHT];
double aux;
int x, y, row_b, memory_index_x, memory_index_y;
x = (blockIdx.x * blockDim.x) + threadIdx.x; // Column
y = (blockIdx.y * blockDim.y) + threadIdx.y; // Row
memory_index_x = threadIdx.x;
memory_index_y = threadIdx.y;
row_b = x*ldb;
// Cada bloque guarda su pixel de A en memoria compartida
shared_A[memory_index_x][memory_index_y] = d_A[memory_index_x*lda + memory_index_y + stride_A];
aux = alpha*d_B[row_b + y + stride_B];
__syncthreads();
for(int k = 0; k <= TILE_WIDTH; ++k) {
if(k == memory_index_x) {
// Se lleg a la diagonal de A, la incgnita queda resuelta y se guarda su resultado
tile_B[k][memory_index_y] = aux/shared_A[k][k];
}
__syncwarp();
if(k < memory_index_x) {
// Se va acumulando la resta de productos mientras se sube por la diagonal de A.
aux -= shared_A[memory_index_x][k]*tile_B[k][memory_index_y];
}
}
d_B[row_b + y + stride_B] = tile_B[memory_index_x][memory_index_y];
}
// Ej 2.1 a-2) Kernel para el caso 32 x n con los threads de un warp comunicandose utilizando la primitiva __shfl_sync
__global__ void dtrsm_32_shuffle_kernel(const double alpha, double *d_A, int lda, double *d_B, int ldb, int stride_A, int stride_B) {
__shared__ double shared_A[TILE_WIDTH][TILE_HEIGHT];
double result, aux, aux2;
int x, y, row_b, memory_index_x, memory_index_y;
x = (blockIdx.x * blockDim.x) + threadIdx.x; // Row
y = (blockIdx.y * blockDim.y) + threadIdx.y; // Column
memory_index_x = threadIdx.x;
memory_index_y = threadIdx.y;
row_b = x*ldb;
// Cada bloque guarda su pixel de A en memoria compartida
shared_A[memory_index_x][memory_index_y] = d_A[memory_index_x*lda + memory_index_y + stride_A];
aux = alpha*d_B[row_b + y + stride_B];
__syncthreads();
result = 0;
for(int k = 0; k <= TILE_WIDTH; ++k) {
if(k == memory_index_x) {
// Se lleg a la diagonal de A, la incgnita queda resuelta y se guarda su resultado
result = aux/shared_A[k][k];
}
__syncwarp();
aux2 = __shfl_sync(0xffffffff, result, k);
if(k < memory_index_x) {
// Se va acumulando la resta de productos mientras se sube por la diagonal de A.
aux -= shared_A[memory_index_x][k]*aux2;
}
}
d_B[row_b + y + stride_B] = result;
}
__global__ void dgemm_shared_kernel(int p, const double alpha, double *d_A, int lda, double *d_B, int ldb, double beta, double *d_C, int ldc, int stride_A, int stride_B, int stride_C);
// Ej 2.2) Funcin para el caso 32k x n con los threads de un warp comunicandose a travs de la mejor variante de 2.1
// Ac la matriz triangular es de 32k x 32k, y podemos dividirla en k x k tiles de 32 x 32 elementos. Con:
// - Tiles diagonales (matrices triangulares)
// - Tiles no diagonales (matrices que no poseen estructura triangular)
// Para resolver n sistemas de 32k:
// - Cada bloque de threads procesar 32 columnasde B (Recorriendo los tiles de A_{i,j} secuencialmente de izq a der y arriba hacia abajo)
// Si el tile es diagonal la operacion es idntica al caso anterior.
// Si el tile no es diagonal la operacin a realizar es la actualizacin del tile B_{i} mediante una operacin DGEMM con tiles de 32x32
void dtrsm_32k(int block_amount_x, int block_amount_y, const double alpha, double *d_A, int lda, double *d_B, int ldb, int meta_stride_A, int meta_stride_B) {
// A es de 32k x 32k. En donde k == block_amount_x
// B es de 32k x n. En donde k == block_amount_x y n = 32*block_amount_y
int stride_A, stride_B, stride_C;
dim3 tamGrid(1, block_amount_y); // Grid dimension
dim3 tamGridDGEMM(block_amount_y, 1); // Grid dimension para DGEMM, accede por filas en lugar de columnas por lo que est espejado
dim3 tamBlock(TILE_WIDTH, TILE_HEIGHT); // Block dimension
for(int i = 0; i < block_amount_x; ++i) {
stride_A = meta_stride_A + 32*i*lda; // Move the stride in A to the next block of rows.
stride_B = meta_stride_B; // Move the stride in B to the previous block of rows (Not used when i = 0).
stride_C = meta_stride_B + 32*i*ldb; // Move the stride in C to the next block of rows.
for(int j = 0; j <= i; ++j) {
if (i == j) { // Diagonal
hipLaunchKernelGGL(( dtrsm_32_shared_kernel), dim3(tamGrid), dim3(tamBlock), 0, 0, alpha, d_A, lda, d_B, ldb, stride_A, stride_C);
} else { // No diagonal
// Bi = Bi - Aij * Bj
// Bi = 32 x n (fila superior). Bj = 32 x n (fila actual a actualizar). A = 32 x 32. p == n
hipLaunchKernelGGL(( dgemm_shared_kernel), dim3(tamGridDGEMM), dim3(tamBlock), 0, 0, 32, -1.0, d_A, lda, d_B, ldb, 1.0, d_B, ldb, stride_A, stride_B, stride_C);
}
stride_A += 32; // Move the stride in A to the next column block
stride_B += 32*ldb;
}
}
}
// Ej 3.3) Kernel que implementa una solucin recursiva de DTRSM empleando DGEMM y dividiendo la matriz triangular en tiles de 32x32.
// El paso base es DTRSM 32 x n DTRSM 32k x n (para un k pequeo)
// El paso recursivo divide la matriz A en 4 submatrices (Y a B de forma coherente).
void dtrsm_recursive(int m, int block_amount_y, const double alpha, double *d_A, int lda, double *d_B, int ldb, int stride_A, int stride_B) {
if(m == 64) { // Paso base, A 32*2 x 32*2
dtrsm_32k(2, block_amount_y, alpha, d_A, lda, d_B, ldb, stride_A, stride_B);
} else { // Paso recursivo
// A y B se parten en: |A11 0 | |B1|
// |A21 A22| |B2|
m /= 2;
dim3 tamGridDGEMM(block_amount_y, m/32); // Grid dimension para DGEMM, accede por filas en lugar de columnas por lo que est espejado
dim3 tamBlock(TILE_WIDTH, TILE_HEIGHT); // Block dimension
// Se procesa A11, manteniendo direcciones de memoria.
dtrsm_recursive(m, block_amount_y, alpha, d_A, lda, d_B, ldb, stride_A, stride_B);
// Se procesa A21 (DGEMM), shifteando las direcciones de memoria al bloque de filas de abajo.
hipLaunchKernelGGL(( dgemm_shared_kernel), dim3(tamGridDGEMM), dim3(tamBlock), 0, 0, m, -1.0, d_A, lda, d_B, ldb, 1.0, d_B, ldb, stride_A + m*lda, stride_B, stride_B + m*ldb);
// Se procesa A22, shifteando las direcciones de memoria al bloque de filas de abajo y A m columnas hacia la derecha.
dtrsm_recursive(m, block_amount_y, alpha, d_A, lda, d_B, ldb, stride_A + m*lda + m, stride_B + m*ldb);
}
}
// A y B son arreglos unidimensionales de m lda y n ldb elementos.
// Para A el tringulo inferior del bloque superior izquierdo de tamao mm debe contener a A en su totalidad (El triangulo superior no es referenciado)
// La operacin es in-place (los resultados se devuelven en la matriz B)
void dtrsm_gpu(int algorithm, int m, int n, const double alpha, double *A, int lda, double *B, int ldb) {
// Etapa 1: Reserva de Memoria
unsigned int size_a = m*lda*sizeof(double);
unsigned int size_b = m*ldb*sizeof(double);
// Reserva en CPU
double * device_A = (double *)malloc(size_a);
double * device_B = (double *)malloc(size_b);
// Reserva en GPU
CUDA_CHK(hipMalloc((void**)& device_A, size_a));
CUDA_CHK(hipMalloc((void**)& device_B, size_b));
// Etapa 2: Transferencia de datos (Host -> Device)
CUDA_CHK(hipMemcpy(device_A, A, size_a, hipMemcpyHostToDevice)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia
CUDA_CHK(hipMemcpy(device_B, B, size_b, hipMemcpyHostToDevice));
// Etapa 3: Definir grilla
// Se crea una grilla con las dimensiones de B
int block_amount_x = m / TILE_WIDTH + (m % TILE_WIDTH != 0); // Division with ceiling
int block_amount_y = n / TILE_HEIGHT + (n % TILE_HEIGHT != 0); // Division with ceiling
dim3 tamGrid(block_amount_x, block_amount_y); // Grid dimension
dim3 tamBlock(TILE_WIDTH, TILE_HEIGHT); // Block dimension
// Etapa 4 : Lanzar Kernel
switch(algorithm) {
case 3: // Versin 32 x n
hipLaunchKernelGGL(( dtrsm_32_shared_kernel), dim3(tamGrid), dim3(tamBlock), 0, 0, alpha, device_A, lda, device_B, ldb, 0, 0);
break;
case 4: // Versin 32k x n
dtrsm_32k(block_amount_x, block_amount_y, alpha, device_A, lda, device_B, ldb, 0, 0);
break;
case 5: // Versin recursiva.
dtrsm_recursive(m, block_amount_y, alpha, device_A, lda, device_B, ldb, 0, 0);
break;
case 7: // Versin 32 x n Shuffle/Shared (la menos eficiente)
hipLaunchKernelGGL(( dtrsm_32_shuffle_kernel), dim3(tamGrid), dim3(tamBlock), 0, 0, alpha, device_A, lda, device_B, ldb, 0, 0);
}
hipDeviceSynchronize();
// Etapa 5: Transferencia de Datos (Device -> Host)
CUDA_CHK(hipMemcpy(B, device_B, size_b, hipMemcpyDeviceToHost));
// Etapa 6: Liberacin de Memoria
CUDA_CHK(hipFree(device_A));
CUDA_CHK(hipFree(device_B));
}
void dtrsm_cublas(int m, int n, const double *alpha, double *A, int lda, double *B, int ldb) {
hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS;
hipblasHandle_t handle;
// Etapa 1: Reserva de Memoria
unsigned int size_a = m*lda*sizeof(double);
unsigned int size_b = ldb*n*sizeof(double);
// Reserva en CPU
double * device_A = (double *)malloc(size_a);
double * device_B = (double *)malloc(size_b);
// Reserva en GPU
CUDA_CHK(hipMalloc((void**)& device_A, size_a));
CUDA_CHK(hipMalloc((void**)& device_B, size_b));
// Etapa 2: Crear Handle de CuBlas
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return;
}
// Etapa 3: Transferencia de datos (Host -> Device)
status = hipblasSetMatrix(m, m, sizeof(double), A, lda, device_A, lda);
if (status != HIPBLAS_STATUS_SUCCESS) {
printf ("data download A failed\n");
CUDA_CHK(hipFree(device_A));
hipblasDestroy(handle);
return;
}
status = hipblasSetMatrix (m, n, sizeof(double), B, ldb, device_B, ldb);
if (status != HIPBLAS_STATUS_SUCCESS) {
printf ("data download B failed\n");
CUDA_CHK(hipFree(device_A));
CUDA_CHK(hipFree(device_B));
hipblasDestroy(handle);
return;
}
// Etapa 4 : Lanzar Kernel
status = hipblasDtrsm(
handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT,
m, n, alpha, device_A, lda, device_B, ldb
);
if (status != HIPBLAS_STATUS_SUCCESS) {
printf ("DTRSM operation failed\n");
CUDA_CHK(hipFree(device_A));
CUDA_CHK(hipFree(device_B));
hipblasDestroy(handle);
return;
}
// Etapa 5: Transferencia de Datos (Device -> Host)
status = hipblasGetMatrix (m, n, sizeof(double), device_B, ldb, B, ldb);
if (status != HIPBLAS_STATUS_SUCCESS) {
printf ("data upload failed\n");
hipblasDestroy(handle);
}
// Etapa 6: Liberacin de Memoria
CUDA_CHK(hipFree(device_A));
CUDA_CHK(hipFree(device_B));
} | 2b6519c0e4248e90afc8830f9e425e21694a7a9e.cu | #include "util.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cublas_v2.h"
#include <algorithm> // std::min std::max
using namespace std;
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
// Resolución de ecuaciones matriciales. Usando doble presición
// A × X = αB, donde α es un escalar, X y B ∈ R^{m×n}, y A ∈ R^{m×m} es una matriz triangular (inferior para esta implementación).
// Esto equivale a resolver n sistemas de ecuaciones de forma Ax_i = b_i, donde b_i es una columna de B y x_i es la solución buscada
// Al ser la matriz triangular el sistema de ecuaciones lineales ya viene "escalerizado".
// Ej 2.1 a) Caso 32 x n
// Para resolver estos sistemas:
// - Cada bloque de threads debe sobreescribir un tile de B con el resultado de la operación.
// - Cada warp del bloque procesa una columna de 32 elementos (resuelve uno de los n sistemas de ecuaciones). Como todos usan A hay que guardarla en memoria rápida.
// - Cada thread del warp calcula un elemento de la columna().
// - Cada thread lee datos calculados por los threads del warp del índice anterior. Para compartir datos entre los hilos del warp tenemos las siguientes opciones:
// Ej 2.1 a-1) Kernel para el caso 32 x n con los threads de un warp comunicandose a través memoria compartida
// El paralelismo a nivel de warps es implicito, porque dentro de un warp se avanza en el código secuencialmente
__global__ void dtrsm_32_shared_kernel(const double alpha, double *d_A, int lda, double *d_B, int ldb, int stride_A, int stride_B) {
__shared__ double shared_A[TILE_WIDTH][TILE_HEIGHT];
__shared__ double tile_B[TILE_WIDTH][TILE_HEIGHT];
double aux;
int x, y, row_b, memory_index_x, memory_index_y;
x = (blockIdx.x * blockDim.x) + threadIdx.x; // Column
y = (blockIdx.y * blockDim.y) + threadIdx.y; // Row
memory_index_x = threadIdx.x;
memory_index_y = threadIdx.y;
row_b = x*ldb;
// Cada bloque guarda su pixel de A en memoria compartida
shared_A[memory_index_x][memory_index_y] = d_A[memory_index_x*lda + memory_index_y + stride_A];
aux = alpha*d_B[row_b + y + stride_B];
__syncthreads();
for(int k = 0; k <= TILE_WIDTH; ++k) {
if(k == memory_index_x) {
// Se llegó a la diagonal de A, la incógnita queda resuelta y se guarda su resultado
tile_B[k][memory_index_y] = aux/shared_A[k][k];
}
__syncwarp();
if(k < memory_index_x) {
// Se va acumulando la resta de productos mientras se sube por la diagonal de A.
aux -= shared_A[memory_index_x][k]*tile_B[k][memory_index_y];
}
}
d_B[row_b + y + stride_B] = tile_B[memory_index_x][memory_index_y];
}
// Ej 2.1 a-2) Kernel para el caso 32 x n con los threads de un warp comunicandose utilizando la primitiva __shfl_sync
__global__ void dtrsm_32_shuffle_kernel(const double alpha, double *d_A, int lda, double *d_B, int ldb, int stride_A, int stride_B) {
__shared__ double shared_A[TILE_WIDTH][TILE_HEIGHT];
double result, aux, aux2;
int x, y, row_b, memory_index_x, memory_index_y;
x = (blockIdx.x * blockDim.x) + threadIdx.x; // Row
y = (blockIdx.y * blockDim.y) + threadIdx.y; // Column
memory_index_x = threadIdx.x;
memory_index_y = threadIdx.y;
row_b = x*ldb;
// Cada bloque guarda su pixel de A en memoria compartida
shared_A[memory_index_x][memory_index_y] = d_A[memory_index_x*lda + memory_index_y + stride_A];
aux = alpha*d_B[row_b + y + stride_B];
__syncthreads();
result = 0;
for(int k = 0; k <= TILE_WIDTH; ++k) {
if(k == memory_index_x) {
// Se llegó a la diagonal de A, la incógnita queda resuelta y se guarda su resultado
result = aux/shared_A[k][k];
}
__syncwarp();
aux2 = __shfl_sync(0xffffffff, result, k);
if(k < memory_index_x) {
// Se va acumulando la resta de productos mientras se sube por la diagonal de A.
aux -= shared_A[memory_index_x][k]*aux2;
}
}
d_B[row_b + y + stride_B] = result;
}
__global__ void dgemm_shared_kernel(int p, const double alpha, double *d_A, int lda, double *d_B, int ldb, double beta, double *d_C, int ldc, int stride_A, int stride_B, int stride_C);
// Ej 2.2) Función para el caso 32k x n con los threads de un warp comunicandose a través de la mejor variante de 2.1
// Acá la matriz triangular es de 32k x 32k, y podemos dividirla en k x k tiles de 32 x 32 elementos. Con:
// - Tiles diagonales (matrices triangulares)
// - Tiles no diagonales (matrices que no poseen estructura triangular)
// Para resolver n sistemas de 32k:
// - Cada bloque de threads procesará 32 columnasde B (Recorriendo los tiles de A_{i,j} secuencialmente de izq a der y arriba hacia abajo)
// Si el tile es diagonal la operacion es idéntica al caso anterior.
// Si el tile no es diagonal la operación a realizar es la actualización del tile B_{i} mediante una operación DGEMM con tiles de 32x32
void dtrsm_32k(int block_amount_x, int block_amount_y, const double alpha, double *d_A, int lda, double *d_B, int ldb, int meta_stride_A, int meta_stride_B) {
// A es de 32k x 32k. En donde k == block_amount_x
// B es de 32k x n. En donde k == block_amount_x y n = 32*block_amount_y
int stride_A, stride_B, stride_C;
dim3 tamGrid(1, block_amount_y); // Grid dimension
dim3 tamGridDGEMM(block_amount_y, 1); // Grid dimension para DGEMM, accede por filas en lugar de columnas por lo que está espejado
dim3 tamBlock(TILE_WIDTH, TILE_HEIGHT); // Block dimension
for(int i = 0; i < block_amount_x; ++i) {
stride_A = meta_stride_A + 32*i*lda; // Move the stride in A to the next block of rows.
stride_B = meta_stride_B; // Move the stride in B to the previous block of rows (Not used when i = 0).
stride_C = meta_stride_B + 32*i*ldb; // Move the stride in C to the next block of rows.
for(int j = 0; j <= i; ++j) {
if (i == j) { // Diagonal
dtrsm_32_shared_kernel<<<tamGrid, tamBlock>>>(alpha, d_A, lda, d_B, ldb, stride_A, stride_C);
} else { // No diagonal
// Bi = Bi - Aij * Bj
// Bi = 32 x n (fila superior). Bj = 32 x n (fila actual a actualizar). A = 32 x 32. p == n
dgemm_shared_kernel<<<tamGridDGEMM, tamBlock>>>(32, -1.0, d_A, lda, d_B, ldb, 1.0, d_B, ldb, stride_A, stride_B, stride_C);
}
stride_A += 32; // Move the stride in A to the next column block
stride_B += 32*ldb;
}
}
}
// Ej 3.3) Kernel que implementa una solución recursiva de DTRSM empleando DGEMM y dividiendo la matriz triangular en tiles de 32x32.
// El paso base es DTRSM 32 x n ó DTRSM 32k x n (para un k pequeño)
// El paso recursivo divide la matriz A en 4 submatrices (Y a B de forma coherente).
void dtrsm_recursive(int m, int block_amount_y, const double alpha, double *d_A, int lda, double *d_B, int ldb, int stride_A, int stride_B) {
if(m == 64) { // Paso base, A 32*2 x 32*2
dtrsm_32k(2, block_amount_y, alpha, d_A, lda, d_B, ldb, stride_A, stride_B);
} else { // Paso recursivo
// A y B se parten en: |A11 0 | |B1|
// |A21 A22| |B2|
m /= 2;
dim3 tamGridDGEMM(block_amount_y, m/32); // Grid dimension para DGEMM, accede por filas en lugar de columnas por lo que está espejado
dim3 tamBlock(TILE_WIDTH, TILE_HEIGHT); // Block dimension
// Se procesa A11, manteniendo direcciones de memoria.
dtrsm_recursive(m, block_amount_y, alpha, d_A, lda, d_B, ldb, stride_A, stride_B);
// Se procesa A21 (DGEMM), shifteando las direcciones de memoria al bloque de filas de abajo.
dgemm_shared_kernel<<<tamGridDGEMM, tamBlock>>>(m, -1.0, d_A, lda, d_B, ldb, 1.0, d_B, ldb, stride_A + m*lda, stride_B, stride_B + m*ldb);
// Se procesa A22, shifteando las direcciones de memoria al bloque de filas de abajo y A m columnas hacia la derecha.
dtrsm_recursive(m, block_amount_y, alpha, d_A, lda, d_B, ldb, stride_A + m*lda + m, stride_B + m*ldb);
}
}
// A y B son arreglos unidimensionales de m × lda y n × ldb elementos.
// Para A el triángulo inferior del bloque superior izquierdo de tamaño m×m debe contener a A en su totalidad (El triangulo superior no es referenciado)
// La operación es in-place (los resultados se devuelven en la matriz B)
void dtrsm_gpu(int algorithm, int m, int n, const double alpha, double *A, int lda, double *B, int ldb) {
// Etapa 1: Reserva de Memoria
unsigned int size_a = m*lda*sizeof(double);
unsigned int size_b = m*ldb*sizeof(double);
// Reserva en CPU
double * device_A = (double *)malloc(size_a);
double * device_B = (double *)malloc(size_b);
// Reserva en GPU
CUDA_CHK(cudaMalloc((void**)& device_A, size_a));
CUDA_CHK(cudaMalloc((void**)& device_B, size_b));
// Etapa 2: Transferencia de datos (Host -> Device)
CUDA_CHK(cudaMemcpy(device_A, A, size_a, cudaMemcpyHostToDevice)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia
CUDA_CHK(cudaMemcpy(device_B, B, size_b, cudaMemcpyHostToDevice));
// Etapa 3: Definir grilla
// Se crea una grilla con las dimensiones de B
int block_amount_x = m / TILE_WIDTH + (m % TILE_WIDTH != 0); // Division with ceiling
int block_amount_y = n / TILE_HEIGHT + (n % TILE_HEIGHT != 0); // Division with ceiling
dim3 tamGrid(block_amount_x, block_amount_y); // Grid dimension
dim3 tamBlock(TILE_WIDTH, TILE_HEIGHT); // Block dimension
// Etapa 4 : Lanzar Kernel
switch(algorithm) {
case 3: // Versión 32 x n
dtrsm_32_shared_kernel<<<tamGrid, tamBlock>>>(alpha, device_A, lda, device_B, ldb, 0, 0);
break;
case 4: // Versión 32k x n
dtrsm_32k(block_amount_x, block_amount_y, alpha, device_A, lda, device_B, ldb, 0, 0);
break;
case 5: // Versión recursiva.
dtrsm_recursive(m, block_amount_y, alpha, device_A, lda, device_B, ldb, 0, 0);
break;
case 7: // Versión 32 x n Shuffle/Shared (la menos eficiente)
dtrsm_32_shuffle_kernel<<<tamGrid, tamBlock>>>(alpha, device_A, lda, device_B, ldb, 0, 0);
}
cudaDeviceSynchronize();
// Etapa 5: Transferencia de Datos (Device -> Host)
CUDA_CHK(cudaMemcpy(B, device_B, size_b, cudaMemcpyDeviceToHost));
// Etapa 6: Liberación de Memoria
CUDA_CHK(cudaFree(device_A));
CUDA_CHK(cudaFree(device_B));
}
void dtrsm_cublas(int m, int n, const double *alpha, double *A, int lda, double *B, int ldb) {
cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
cublasHandle_t handle;
// Etapa 1: Reserva de Memoria
unsigned int size_a = m*lda*sizeof(double);
unsigned int size_b = ldb*n*sizeof(double);
// Reserva en CPU
double * device_A = (double *)malloc(size_a);
double * device_B = (double *)malloc(size_b);
// Reserva en GPU
CUDA_CHK(cudaMalloc((void**)& device_A, size_a));
CUDA_CHK(cudaMalloc((void**)& device_B, size_b));
// Etapa 2: Crear Handle de CuBlas
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return;
}
// Etapa 3: Transferencia de datos (Host -> Device)
status = cublasSetMatrix(m, m, sizeof(double), A, lda, device_A, lda);
if (status != CUBLAS_STATUS_SUCCESS) {
printf ("data download A failed\n");
CUDA_CHK(cudaFree(device_A));
cublasDestroy(handle);
return;
}
status = cublasSetMatrix (m, n, sizeof(double), B, ldb, device_B, ldb);
if (status != CUBLAS_STATUS_SUCCESS) {
printf ("data download B failed\n");
CUDA_CHK(cudaFree(device_A));
CUDA_CHK(cudaFree(device_B));
cublasDestroy(handle);
return;
}
// Etapa 4 : Lanzar Kernel
status = cublasDtrsm(
handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT,
m, n, alpha, device_A, lda, device_B, ldb
);
if (status != CUBLAS_STATUS_SUCCESS) {
printf ("DTRSM operation failed\n");
CUDA_CHK(cudaFree(device_A));
CUDA_CHK(cudaFree(device_B));
cublasDestroy(handle);
return;
}
// Etapa 5: Transferencia de Datos (Device -> Host)
status = cublasGetMatrix (m, n, sizeof(double), device_B, ldb, B, ldb);
if (status != CUBLAS_STATUS_SUCCESS) {
printf ("data upload failed\n");
cublasDestroy(handle);
}
// Etapa 6: Liberación de Memoria
CUDA_CHK(cudaFree(device_A));
CUDA_CHK(cudaFree(device_B));
} |
0d5f97ced5f169d5b2e8cab04fb055360aeae710.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// KERNEL FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
__global__ void kernel(){
//======================================================================================================================================================
// COMMON VARIABLES
//======================================================================================================================================================
fp* d_in;
int rot_row;
int rot_col;
int in2_rowlow;
int in2_collow;
int ic;
int jc;
int jp1;
int ja1, ja2;
int ip1;
int ia1, ia2;
int ja, jb;
int ia, ib;
float s;
int i;
int j;
int row;
int col;
int ori_row;
int ori_col;
int position;
float sum;
int pos_ori;
float temp;
float temp2;
int location;
int cent;
int tMask_row;
int tMask_col;
float largest_value_current = 0;
float largest_value = 0;
int largest_coordinate_current = 0;
int largest_coordinate = 0;
float fin_max_val = 0;
int fin_max_coo = 0;
int largest_row;
int largest_col;
int offset_row;
int offset_col;
__shared__ float in_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE
__shared__ float in_sqr_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE
__shared__ float in_final_sum;
__shared__ float in_sqr_final_sum;
float mean;
float mean_sqr;
float variance;
float deviation;
__shared__ float denomT;
__shared__ float par_max_val[131]; // WATCH THIS !!! HARDCODED VALUE
__shared__ int par_max_coo[131]; // WATCH THIS !!! HARDCODED VALUE
int pointer;
__shared__ float d_in_mod_temp[2601];
int ori_pointer;
int loc_pointer;
//======================================================================================================================================================
// THREAD PARAMETERS
//======================================================================================================================================================
int bx = blockIdx.x; // get current horizontal block index (0-n)
int tx = threadIdx.x; // get current horizontal thread index (0-n)
int ei_new;
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// GENERATE TEMPLATE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// generate templates based on the first frame only
if(d_common_change.frame_no == 0){
//======================================================================================================================================================
// GET POINTER TO TEMPLATE FOR THE POINT
//======================================================================================================================================================
// pointers to: current template for current point
d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer];
//======================================================================================================================================================
// UPDATE ROW LOC AND COL LOC
//======================================================================================================================================================
// uptade temporary endo/epi row/col coordinates (in each block corresponding to point, narrow work to one thread)
ei_new = tx;
if(ei_new == 0){
// update temporary row/col coordinates
pointer = d_unique[bx].point_no*d_common.no_frames+d_common_change.frame_no;
d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no];
d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no];
}
//======================================================================================================================================================
// CREATE TEMPLATES
//======================================================================================================================================================
// work
// ei_new = tx;
// while(ei_new < d_common.in_elem){
#pragma unroll 8
for(ei_new = tx; ei_new < d_common.in_elem; ei_new +=NUMBER_THREADS){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in_rows == 0){
row = d_common.in_rows - 1;
col = col-1;
}
// figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right)
ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1;
ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1;
ori_pointer = ori_col*d_common.frame_rows+ori_row;
// update template
d_in[col*d_common.in_rows+row] = d_common_change.d_frame[ori_pointer];
// go for second round
// ei_new = ei_new + NUMBER_THREADS;
}
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// PROCESS POINTS
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// process points in all frames except for the first one
if(d_common_change.frame_no != 0){
//======================================================================================================================================================
// SELECTION
//======================================================================================================================================================
in2_rowlow = d_unique[bx].d_Row[d_unique[bx].point_no] - d_common.sSize; // (1 to n+1)
in2_collow = d_unique[bx].d_Col[d_unique[bx].point_no] - d_common.sSize;
// work
ei_new = tx;
while(ei_new < d_common.in2_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_rows == 0){
row = d_common.in2_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + in2_rowlow - 1;
ori_col = col + in2_collow - 1;
d_unique[bx].d_in2[ei_new] = d_common_change.d_frame[ori_col*d_common.frame_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// CONVOLUTION
//======================================================================================================================================================
//====================================================================================================
// ROTATION
//====================================================================================================
// variables
d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer];
// work
ei_new = tx;
while(ei_new < d_common.in_elem){
// figure out row/col location in padded array
row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in_rows == 0){
row = d_common.in_rows - 1;
col = col-1;
}
// execution
rot_row = (d_common.in_rows-1) - row;
rot_col = (d_common.in_rows-1) - col;
d_in_mod_temp[ei_new] = d_in[rot_col*d_common.in_rows+rot_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// ACTUAL CONVOLUTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.conv_elem){
// figure out row/col location in array
ic = (ei_new+1) % d_common.conv_rows; // (1-n)
jc = (ei_new+1) / d_common.conv_rows + 1; // (1-n)
if((ei_new+1) % d_common.conv_rows == 0){
ic = d_common.conv_rows;
jc = jc-1;
}
//
j = jc + d_common.joffset;
jp1 = j + 1;
if(d_common.in2_cols < jp1){
ja1 = jp1 - d_common.in2_cols;
}
else{
ja1 = 1;
}
if(d_common.in_cols < j){
ja2 = d_common.in_cols;
}
else{
ja2 = j;
}
i = ic + d_common.ioffset;
ip1 = i + 1;
if(d_common.in2_rows < ip1){
ia1 = ip1 - d_common.in2_rows;
}
else{
ia1 = 1;
}
if(d_common.in_rows < i){
ia2 = d_common.in_rows;
}
else{
ia2 = i;
}
s = 0;
for(ja=ja1; ja<=ja2; ja++){
jb = jp1 - ja;
for(ia=ia1; ia<=ia2; ia++){
ib = ip1 - ia;
s = s + d_in_mod_temp[d_common.in_rows*(ja-1)+ia-1] * d_unique[bx].d_in2[d_common.in2_rows*(jb-1)+ib-1];
}
}
//d_unique[bx].d_conv[d_common.conv_rows*(jc-1)+ic-1] = s;
d_unique[bx].d_conv[ei_new] = s;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// CUMULATIVE SUM
//======================================================================================================================================================
//====================================================================================================
// PAD ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
//==================================================
// PADD ARRAY
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_elem){
// figure out row/col location in padded array
row = (ei_new+1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_pad_cumv_rows == 0){
row = d_common.in2_pad_cumv_rows - 1;
col = col-1;
}
// execution
if( row > (d_common.in2_pad_add_rows-1) && // do if has numbers in original array
row < (d_common.in2_pad_add_rows+d_common.in2_rows) &&
col > (d_common.in2_pad_add_cols-1) &&
col < (d_common.in2_pad_add_cols+d_common.in2_cols)){
ori_row = row - d_common.in2_pad_add_rows;
ori_col = col - d_common.in2_pad_add_cols;
d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2[ori_col*d_common.in2_rows+ori_row];
}
else{ // do if otherwise
d_unique[bx].d_in2_pad_cumv[ei_new] = 0;
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// VERTICAL CUMULATIVE SUM
//==================================================
//work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_cols){
// figure out column position
pos_ori = ei_new*d_common.in2_pad_cumv_rows;
// variables
sum = 0;
// loop through all rows
for(position = pos_ori; position < pos_ori+d_common.in2_pad_cumv_rows; position = position + 1){
d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum;
sum = d_unique[bx].d_in2_pad_cumv[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_sel_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_pad_cumv_sel_rows == 0){
row = d_common.in2_pad_cumv_sel_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel_collow - 1;
d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
//==================================================
// SELECTION 2
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub_cumh_rows == 0){
row = d_common.in2_sub_cumh_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1;
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// SUBTRACTION
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_elem){
// subtract
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new] - d_unique[bx].d_in2_sub_cumh[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// HORIZONTAL CUMULATIVE SUM
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_rows){
// figure out row position
pos_ori = ei_new;
// variables
sum = 0;
// loop through all rows
for(position = pos_ori; position < pos_ori+d_common.in2_sub_cumh_elem; position = position + d_common.in2_sub_cumh_rows){
d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum;
sum = d_unique[bx].d_in2_sub_cumh[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_sel_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub_cumh_sel_rows == 0){
row = d_common.in2_sub_cumh_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel_collow - 1;
d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
//==================================================
// SELECTION 2
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub2_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub2_rows == 0){
row = d_common.in2_sub2_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1;
d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// SUBTRACTION
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
// subtract
d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new] - d_unique[bx].d_in2_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// CUMULATIVE SUM 2
//======================================================================================================================================================
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sqr_elem){
temp = d_unique[bx].d_in2[ei_new];
d_unique[bx].d_in2_sqr[ei_new] = temp * temp;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// PAD ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
//==================================================
// PAD ARRAY
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_elem){
// figure out row/col location in padded array
row = (ei_new+1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_pad_cumv_rows == 0){
row = d_common.in2_pad_cumv_rows - 1;
col = col-1;
}
// execution
if( row > (d_common.in2_pad_add_rows-1) && // do if has numbers in original array
row < (d_common.in2_pad_add_rows+d_common.in2_sqr_rows) &&
col > (d_common.in2_pad_add_cols-1) &&
col < (d_common.in2_pad_add_cols+d_common.in2_sqr_cols)){
ori_row = row - d_common.in2_pad_add_rows;
ori_col = col - d_common.in2_pad_add_cols;
d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2_sqr[ori_col*d_common.in2_sqr_rows+ori_row];
}
else{ // do if otherwise
d_unique[bx].d_in2_pad_cumv[ei_new] = 0;
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// VERTICAL CUMULATIVE SUM
//==================================================
//work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_cols){
// figure out column position
pos_ori = ei_new*d_common.in2_pad_cumv_rows;
// variables
sum = 0;
// loop through all rows
for(position = pos_ori; position < pos_ori+d_common.in2_pad_cumv_rows; position = position + 1){
d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum;
sum = d_unique[bx].d_in2_pad_cumv[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_sel_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_pad_cumv_sel_rows == 0){
row = d_common.in2_pad_cumv_sel_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel_collow - 1;
d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
//==================================================
// SELECTION 2
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub_cumh_rows == 0){
row = d_common.in2_sub_cumh_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1;
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// SUBTRACTION
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_elem){
// subtract
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new] - d_unique[bx].d_in2_sub_cumh[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// HORIZONTAL CUMULATIVE SUM
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_rows){
// figure out row position
pos_ori = ei_new;
// variables
sum = 0;
// loop through all rows
for(position = pos_ori; position < pos_ori+d_common.in2_sub_cumh_elem; position = position + d_common.in2_sub_cumh_rows){
d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum;
sum = d_unique[bx].d_in2_sub_cumh[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_sel_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub_cumh_sel_rows == 0){
row = d_common.in2_sub_cumh_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel_collow - 1;
d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
//==================================================
// SELECTION 2
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub2_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub2_rows == 0){
row = d_common.in2_sub2_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1;
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// SUBTRACTION
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
// subtract
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new] - d_unique[bx].d_in2_sqr_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// FINAL
//======================================================================================================================================================
//====================================================================================================
// DENOMINATOR A SAVE RESULT IN CUMULATIVE SUM A2
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
temp = d_unique[bx].d_in2_sub2[ei_new];
temp2 = d_unique[bx].d_in2_sqr_sub2[ei_new] - (temp * temp / d_common.in_elem);
if(temp2 < 0){
temp2 = 0;
}
d_unique[bx].d_in2_sqr_sub2[ei_new] = sqrt(temp2);
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in_sqr_elem){
temp = d_in[ei_new];
d_unique[bx].d_in_sqr[ei_new] = temp * temp;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// IN SUM
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in_cols){
sum = 0;
for(i = 0; i < d_common.in_rows; i++){
sum = sum + d_in[ei_new*d_common.in_rows+i];
}
in_partial_sum[ei_new] = sum;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// IN_SQR SUM
//====================================================================================================
ei_new = tx;
while(ei_new < d_common.in_sqr_rows){
sum = 0;
for(i = 0; i < d_common.in_sqr_cols; i++){
sum = sum + d_unique[bx].d_in_sqr[ei_new+d_common.in_sqr_rows*i];
}
in_sqr_partial_sum[ei_new] = sum;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// FINAL SUMMATION
//====================================================================================================
if(tx == 0){
in_final_sum = 0;
for(i = 0; i<d_common.in_cols; i++){
in_final_sum = in_final_sum + in_partial_sum[i];
}
}else if(tx == 1){
in_sqr_final_sum = 0;
for(i = 0; i<d_common.in_sqr_cols; i++){
in_sqr_final_sum = in_sqr_final_sum + in_sqr_partial_sum[i];
}
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// DENOMINATOR T
//====================================================================================================
if(tx == 0){
mean = in_final_sum / d_common.in_elem; // gets mean (average) value of element in ROI
mean_sqr = mean * mean;
variance = (in_sqr_final_sum / d_common.in_elem) - mean_sqr; // gets variance of ROI
deviation = sqrt(variance); // gets standard deviation of ROI
denomT = sqrt(float(d_common.in_elem-1))*deviation;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// DENOMINATOR SAVE RESULT IN CUMULATIVE SUM A2
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * denomT;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// NUMERATOR SAVE RESULT IN CONVOLUTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.conv_elem){
d_unique[bx].d_conv[ei_new] = d_unique[bx].d_conv[ei_new] - d_unique[bx].d_in2_sub2[ei_new] * in_final_sum / d_common.in_elem;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// CORRELATION SAVE RESULT IN CUMULATIVE SUM A2
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_conv[ei_new] / d_unique[bx].d_in2_sqr_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// TEMPLATE MASK CREATE
//======================================================================================================================================================
cent = d_common.sSize + d_common.tSize + 1;
if(d_common_change.frame_no == 0){
tMask_row = cent + d_unique[bx].d_Row[d_unique[bx].point_no] - d_unique[bx].d_Row[d_unique[bx].point_no] - 1;
tMask_col = cent + d_unique[bx].d_Col[d_unique[bx].point_no] - d_unique[bx].d_Col[d_unique[bx].point_no] - 1;
}
else{
pointer = d_common_change.frame_no-1+d_unique[bx].point_no*d_common.no_frames;
tMask_row = cent + d_unique[bx].d_tRowLoc[pointer] - d_unique[bx].d_Row[d_unique[bx].point_no] - 1;
tMask_col = cent + d_unique[bx].d_tColLoc[pointer] - d_unique[bx].d_Col[d_unique[bx].point_no] - 1;
}
//work
ei_new = tx;
while(ei_new < d_common.tMask_elem){
location = tMask_col*d_common.tMask_rows + tMask_row;
if(ei_new==location){
d_unique[bx].d_tMask[ei_new] = 1;
}
else{
d_unique[bx].d_tMask[ei_new] = 0;
}
//go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// MASK CONVOLUTION
//======================================================================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.mask_conv_elem){
// figure out row/col location in array
ic = (ei_new+1) % d_common.mask_conv_rows; // (1-n)
jc = (ei_new+1) / d_common.mask_conv_rows + 1; // (1-n)
if((ei_new+1) % d_common.mask_conv_rows == 0){
ic = d_common.mask_conv_rows;
jc = jc-1;
}
//
j = jc + d_common.mask_conv_joffset;
jp1 = j + 1;
if(d_common.mask_cols < jp1){
ja1 = jp1 - d_common.mask_cols;
}
else{
ja1 = 1;
}
if(d_common.tMask_cols < j){
ja2 = d_common.tMask_cols;
}
else{
ja2 = j;
}
i = ic + d_common.mask_conv_ioffset;
ip1 = i + 1;
if(d_common.mask_rows < ip1){
ia1 = ip1 - d_common.mask_rows;
}
else{
ia1 = 1;
}
if(d_common.tMask_rows < i){
ia2 = d_common.tMask_rows;
}
else{
ia2 = i;
}
s = 0;
for(ja=ja1; ja<=ja2; ja++){
jb = jp1 - ja;
for(ia=ia1; ia<=ia2; ia++){
ib = ip1 - ia;
s = s + d_unique[bx].d_tMask[d_common.tMask_rows*(ja-1)+ia-1] * 1;
}
}
// //d_unique[bx].d_mask_conv[d_common.mask_conv_rows*(jc-1)+ic-1] = s;
d_unique[bx].d_mask_conv[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * s;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// MAXIMUM VALUE
//======================================================================================================================================================
//====================================================================================================
// INITIAL SEARCH
//====================================================================================================
ei_new = tx;
while(ei_new < d_common.mask_conv_rows){
for(i=0; i<d_common.mask_conv_cols; i++){
largest_coordinate_current = ei_new*d_common.mask_conv_rows+i;
largest_value_current = abs(d_unique[bx].d_mask_conv[largest_coordinate_current]);
if(largest_value_current > largest_value){
largest_coordinate = largest_coordinate_current;
largest_value = largest_value_current;
}
}
par_max_coo[ei_new] = largest_coordinate;
par_max_val[ei_new] = largest_value;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// FINAL SEARCH
//====================================================================================================
if(tx == 0){
for(i = 0; i < d_common.mask_conv_rows; i++){
if(par_max_val[i] > fin_max_val){
fin_max_val = par_max_val[i];
fin_max_coo = par_max_coo[i];
}
}
// convert coordinate to row/col form
largest_row = (fin_max_coo+1) % d_common.mask_conv_rows - 1; // (0-n) row
largest_col = (fin_max_coo+1) / d_common.mask_conv_rows; // (0-n) column
if((fin_max_coo+1) % d_common.mask_conv_rows == 0){
largest_row = d_common.mask_conv_rows - 1;
largest_col = largest_col - 1;
}
// calculate offset
largest_row = largest_row + 1; // compensate to match MATLAB format (1-n)
largest_col = largest_col + 1; // compensate to match MATLAB format (1-n)
offset_row = largest_row - d_common.in_rows - (d_common.sSize - d_common.tSize);
offset_col = largest_col - d_common.in_cols - (d_common.sSize - d_common.tSize);
pointer = d_common_change.frame_no+d_unique[bx].point_no*d_common.no_frames;
d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no] + offset_row;
d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no] + offset_col;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// COORDINATE AND TEMPLATE UPDATE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// time19 = clock();
// if the last frame in the bath, update template
if(d_common_change.frame_no != 0 && (d_common_change.frame_no)%10 == 0){
// update coordinate
loc_pointer = d_unique[bx].point_no*d_common.no_frames+d_common_change.frame_no;
d_unique[bx].d_Row[d_unique[bx].point_no] = d_unique[bx].d_tRowLoc[loc_pointer];
d_unique[bx].d_Col[d_unique[bx].point_no] = d_unique[bx].d_tColLoc[loc_pointer];
// work
ei_new = tx;
while(ei_new < d_common.in_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in_rows == 0){
row = d_common.in_rows - 1;
col = col-1;
}
// figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right)
ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1;
ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1;
ori_pointer = ori_col*d_common.frame_rows+ori_row;
// update template
d_in[ei_new] = d_common.alpha*d_in[ei_new] + (1.00-d_common.alpha)*d_common_change.d_frame[ori_pointer];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
}
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// END OF FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
| 0d5f97ced5f169d5b2e8cab04fb055360aeae710.cu | //===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// KERNEL FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
__global__ void kernel(){
//======================================================================================================================================================
// COMMON VARIABLES
//======================================================================================================================================================
fp* d_in;
int rot_row;
int rot_col;
int in2_rowlow;
int in2_collow;
int ic;
int jc;
int jp1;
int ja1, ja2;
int ip1;
int ia1, ia2;
int ja, jb;
int ia, ib;
float s;
int i;
int j;
int row;
int col;
int ori_row;
int ori_col;
int position;
float sum;
int pos_ori;
float temp;
float temp2;
int location;
int cent;
int tMask_row;
int tMask_col;
float largest_value_current = 0;
float largest_value = 0;
int largest_coordinate_current = 0;
int largest_coordinate = 0;
float fin_max_val = 0;
int fin_max_coo = 0;
int largest_row;
int largest_col;
int offset_row;
int offset_col;
__shared__ float in_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE
__shared__ float in_sqr_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE
__shared__ float in_final_sum;
__shared__ float in_sqr_final_sum;
float mean;
float mean_sqr;
float variance;
float deviation;
__shared__ float denomT;
__shared__ float par_max_val[131]; // WATCH THIS !!! HARDCODED VALUE
__shared__ int par_max_coo[131]; // WATCH THIS !!! HARDCODED VALUE
int pointer;
__shared__ float d_in_mod_temp[2601];
int ori_pointer;
int loc_pointer;
//======================================================================================================================================================
// THREAD PARAMETERS
//======================================================================================================================================================
int bx = blockIdx.x; // get current horizontal block index (0-n)
int tx = threadIdx.x; // get current horizontal thread index (0-n)
int ei_new;
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// GENERATE TEMPLATE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// generate templates based on the first frame only
if(d_common_change.frame_no == 0){
//======================================================================================================================================================
// GET POINTER TO TEMPLATE FOR THE POINT
//======================================================================================================================================================
// pointers to: current template for current point
d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer];
//======================================================================================================================================================
// UPDATE ROW LOC AND COL LOC
//======================================================================================================================================================
// uptade temporary endo/epi row/col coordinates (in each block corresponding to point, narrow work to one thread)
ei_new = tx;
if(ei_new == 0){
// update temporary row/col coordinates
pointer = d_unique[bx].point_no*d_common.no_frames+d_common_change.frame_no;
d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no];
d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no];
}
//======================================================================================================================================================
// CREATE TEMPLATES
//======================================================================================================================================================
// work
// ei_new = tx;
// while(ei_new < d_common.in_elem){
#pragma unroll 8
for(ei_new = tx; ei_new < d_common.in_elem; ei_new +=NUMBER_THREADS){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in_rows == 0){
row = d_common.in_rows - 1;
col = col-1;
}
// figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right)
ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1;
ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1;
ori_pointer = ori_col*d_common.frame_rows+ori_row;
// update template
d_in[col*d_common.in_rows+row] = d_common_change.d_frame[ori_pointer];
// go for second round
// ei_new = ei_new + NUMBER_THREADS;
}
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// PROCESS POINTS
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// process points in all frames except for the first one
if(d_common_change.frame_no != 0){
//======================================================================================================================================================
// SELECTION
//======================================================================================================================================================
in2_rowlow = d_unique[bx].d_Row[d_unique[bx].point_no] - d_common.sSize; // (1 to n+1)
in2_collow = d_unique[bx].d_Col[d_unique[bx].point_no] - d_common.sSize;
// work
ei_new = tx;
while(ei_new < d_common.in2_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_rows == 0){
row = d_common.in2_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + in2_rowlow - 1;
ori_col = col + in2_collow - 1;
d_unique[bx].d_in2[ei_new] = d_common_change.d_frame[ori_col*d_common.frame_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// CONVOLUTION
//======================================================================================================================================================
//====================================================================================================
// ROTATION
//====================================================================================================
// variables
d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer];
// work
ei_new = tx;
while(ei_new < d_common.in_elem){
// figure out row/col location in padded array
row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in_rows == 0){
row = d_common.in_rows - 1;
col = col-1;
}
// execution
rot_row = (d_common.in_rows-1) - row;
rot_col = (d_common.in_rows-1) - col;
d_in_mod_temp[ei_new] = d_in[rot_col*d_common.in_rows+rot_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// ACTUAL CONVOLUTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.conv_elem){
// figure out row/col location in array
ic = (ei_new+1) % d_common.conv_rows; // (1-n)
jc = (ei_new+1) / d_common.conv_rows + 1; // (1-n)
if((ei_new+1) % d_common.conv_rows == 0){
ic = d_common.conv_rows;
jc = jc-1;
}
//
j = jc + d_common.joffset;
jp1 = j + 1;
if(d_common.in2_cols < jp1){
ja1 = jp1 - d_common.in2_cols;
}
else{
ja1 = 1;
}
if(d_common.in_cols < j){
ja2 = d_common.in_cols;
}
else{
ja2 = j;
}
i = ic + d_common.ioffset;
ip1 = i + 1;
if(d_common.in2_rows < ip1){
ia1 = ip1 - d_common.in2_rows;
}
else{
ia1 = 1;
}
if(d_common.in_rows < i){
ia2 = d_common.in_rows;
}
else{
ia2 = i;
}
s = 0;
for(ja=ja1; ja<=ja2; ja++){
jb = jp1 - ja;
for(ia=ia1; ia<=ia2; ia++){
ib = ip1 - ia;
s = s + d_in_mod_temp[d_common.in_rows*(ja-1)+ia-1] * d_unique[bx].d_in2[d_common.in2_rows*(jb-1)+ib-1];
}
}
//d_unique[bx].d_conv[d_common.conv_rows*(jc-1)+ic-1] = s;
d_unique[bx].d_conv[ei_new] = s;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// CUMULATIVE SUM
//======================================================================================================================================================
//====================================================================================================
// PAD ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
//==================================================
// PADD ARRAY
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_elem){
// figure out row/col location in padded array
row = (ei_new+1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_pad_cumv_rows == 0){
row = d_common.in2_pad_cumv_rows - 1;
col = col-1;
}
// execution
if( row > (d_common.in2_pad_add_rows-1) && // do if has numbers in original array
row < (d_common.in2_pad_add_rows+d_common.in2_rows) &&
col > (d_common.in2_pad_add_cols-1) &&
col < (d_common.in2_pad_add_cols+d_common.in2_cols)){
ori_row = row - d_common.in2_pad_add_rows;
ori_col = col - d_common.in2_pad_add_cols;
d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2[ori_col*d_common.in2_rows+ori_row];
}
else{ // do if otherwise
d_unique[bx].d_in2_pad_cumv[ei_new] = 0;
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// VERTICAL CUMULATIVE SUM
//==================================================
//work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_cols){
// figure out column position
pos_ori = ei_new*d_common.in2_pad_cumv_rows;
// variables
sum = 0;
// loop through all rows
for(position = pos_ori; position < pos_ori+d_common.in2_pad_cumv_rows; position = position + 1){
d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum;
sum = d_unique[bx].d_in2_pad_cumv[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_sel_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_pad_cumv_sel_rows == 0){
row = d_common.in2_pad_cumv_sel_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel_collow - 1;
d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
//==================================================
// SELECTION 2
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub_cumh_rows == 0){
row = d_common.in2_sub_cumh_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1;
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// SUBTRACTION
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_elem){
// subtract
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new] - d_unique[bx].d_in2_sub_cumh[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// HORIZONTAL CUMULATIVE SUM
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_rows){
// figure out row position
pos_ori = ei_new;
// variables
sum = 0;
// loop through all rows
for(position = pos_ori; position < pos_ori+d_common.in2_sub_cumh_elem; position = position + d_common.in2_sub_cumh_rows){
d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum;
sum = d_unique[bx].d_in2_sub_cumh[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_sel_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub_cumh_sel_rows == 0){
row = d_common.in2_sub_cumh_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel_collow - 1;
d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
//==================================================
// SELECTION 2
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub2_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub2_rows == 0){
row = d_common.in2_sub2_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1;
d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// SUBTRACTION
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
// subtract
d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new] - d_unique[bx].d_in2_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// CUMULATIVE SUM 2
//======================================================================================================================================================
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sqr_elem){
temp = d_unique[bx].d_in2[ei_new];
d_unique[bx].d_in2_sqr[ei_new] = temp * temp;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// PAD ARRAY, VERTICAL CUMULATIVE SUM
//====================================================================================================
//==================================================
// PAD ARRAY
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_elem){
// figure out row/col location in padded array
row = (ei_new+1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_pad_cumv_rows == 0){
row = d_common.in2_pad_cumv_rows - 1;
col = col-1;
}
// execution
if( row > (d_common.in2_pad_add_rows-1) && // do if has numbers in original array
row < (d_common.in2_pad_add_rows+d_common.in2_sqr_rows) &&
col > (d_common.in2_pad_add_cols-1) &&
col < (d_common.in2_pad_add_cols+d_common.in2_sqr_cols)){
ori_row = row - d_common.in2_pad_add_rows;
ori_col = col - d_common.in2_pad_add_cols;
d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2_sqr[ori_col*d_common.in2_sqr_rows+ori_row];
}
else{ // do if otherwise
d_unique[bx].d_in2_pad_cumv[ei_new] = 0;
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// VERTICAL CUMULATIVE SUM
//==================================================
//work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_cols){
// figure out column position
pos_ori = ei_new*d_common.in2_pad_cumv_rows;
// variables
sum = 0;
// loop through all rows
for(position = pos_ori; position < pos_ori+d_common.in2_pad_cumv_rows; position = position + 1){
d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum;
sum = d_unique[bx].d_in2_pad_cumv[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_pad_cumv_sel_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_pad_cumv_sel_rows == 0){
row = d_common.in2_pad_cumv_sel_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel_collow - 1;
d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//====================================================================================================
//==================================================
// SELECTION 2
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub_cumh_rows == 0){
row = d_common.in2_sub_cumh_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1;
ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1;
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// SUBTRACTION
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_elem){
// subtract
d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new] - d_unique[bx].d_in2_sub_cumh[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// HORIZONTAL CUMULATIVE SUM
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_rows){
// figure out row position
pos_ori = ei_new;
// variables
sum = 0;
// loop through all rows
for(position = pos_ori; position < pos_ori+d_common.in2_sub_cumh_elem; position = position + d_common.in2_sub_cumh_rows){
d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum;
sum = d_unique[bx].d_in2_sub_cumh[position];
}
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub_cumh_sel_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub_cumh_sel_rows == 0){
row = d_common.in2_sub_cumh_sel_rows - 1;
col = col - 1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel_collow - 1;
d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// SELECTION 2, SUBTRACTION
//====================================================================================================
//==================================================
// SELECTION 2
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in2_sub2_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in2_sub2_rows == 0){
row = d_common.in2_sub2_rows - 1;
col = col-1;
}
// figure out corresponding location in old matrix and copy values to new matrix
ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1;
ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1;
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//==================================================
// SYNCHRONIZE THREADS
//==================================================
__syncthreads();
//==================================================
// SUBTRACTION
//==================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
// subtract
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new] - d_unique[bx].d_in2_sqr_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// FINAL
//======================================================================================================================================================
//====================================================================================================
// DENOMINATOR A SAVE RESULT IN CUMULATIVE SUM A2
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
temp = d_unique[bx].d_in2_sub2[ei_new];
temp2 = d_unique[bx].d_in2_sqr_sub2[ei_new] - (temp * temp / d_common.in_elem);
if(temp2 < 0){
temp2 = 0;
}
d_unique[bx].d_in2_sqr_sub2[ei_new] = sqrt(temp2);
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// MULTIPLICATION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in_sqr_elem){
temp = d_in[ei_new];
d_unique[bx].d_in_sqr[ei_new] = temp * temp;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// IN SUM
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in_cols){
sum = 0;
for(i = 0; i < d_common.in_rows; i++){
sum = sum + d_in[ei_new*d_common.in_rows+i];
}
in_partial_sum[ei_new] = sum;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// IN_SQR SUM
//====================================================================================================
ei_new = tx;
while(ei_new < d_common.in_sqr_rows){
sum = 0;
for(i = 0; i < d_common.in_sqr_cols; i++){
sum = sum + d_unique[bx].d_in_sqr[ei_new+d_common.in_sqr_rows*i];
}
in_sqr_partial_sum[ei_new] = sum;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// FINAL SUMMATION
//====================================================================================================
if(tx == 0){
in_final_sum = 0;
for(i = 0; i<d_common.in_cols; i++){
in_final_sum = in_final_sum + in_partial_sum[i];
}
}else if(tx == 1){
in_sqr_final_sum = 0;
for(i = 0; i<d_common.in_sqr_cols; i++){
in_sqr_final_sum = in_sqr_final_sum + in_sqr_partial_sum[i];
}
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// DENOMINATOR T
//====================================================================================================
if(tx == 0){
mean = in_final_sum / d_common.in_elem; // gets mean (average) value of element in ROI
mean_sqr = mean * mean;
variance = (in_sqr_final_sum / d_common.in_elem) - mean_sqr; // gets variance of ROI
deviation = sqrt(variance); // gets standard deviation of ROI
denomT = sqrt(float(d_common.in_elem-1))*deviation;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// DENOMINATOR SAVE RESULT IN CUMULATIVE SUM A2
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * denomT;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// NUMERATOR SAVE RESULT IN CONVOLUTION
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.conv_elem){
d_unique[bx].d_conv[ei_new] = d_unique[bx].d_conv[ei_new] - d_unique[bx].d_in2_sub2[ei_new] * in_final_sum / d_common.in_elem;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// CORRELATION SAVE RESULT IN CUMULATIVE SUM A2
//====================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.in2_sub2_elem){
d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_conv[ei_new] / d_unique[bx].d_in2_sqr_sub2[ei_new];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// TEMPLATE MASK CREATE
//======================================================================================================================================================
cent = d_common.sSize + d_common.tSize + 1;
if(d_common_change.frame_no == 0){
tMask_row = cent + d_unique[bx].d_Row[d_unique[bx].point_no] - d_unique[bx].d_Row[d_unique[bx].point_no] - 1;
tMask_col = cent + d_unique[bx].d_Col[d_unique[bx].point_no] - d_unique[bx].d_Col[d_unique[bx].point_no] - 1;
}
else{
pointer = d_common_change.frame_no-1+d_unique[bx].point_no*d_common.no_frames;
tMask_row = cent + d_unique[bx].d_tRowLoc[pointer] - d_unique[bx].d_Row[d_unique[bx].point_no] - 1;
tMask_col = cent + d_unique[bx].d_tColLoc[pointer] - d_unique[bx].d_Col[d_unique[bx].point_no] - 1;
}
//work
ei_new = tx;
while(ei_new < d_common.tMask_elem){
location = tMask_col*d_common.tMask_rows + tMask_row;
if(ei_new==location){
d_unique[bx].d_tMask[ei_new] = 1;
}
else{
d_unique[bx].d_tMask[ei_new] = 0;
}
//go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// MASK CONVOLUTION
//======================================================================================================================================================
// work
ei_new = tx;
while(ei_new < d_common.mask_conv_elem){
// figure out row/col location in array
ic = (ei_new+1) % d_common.mask_conv_rows; // (1-n)
jc = (ei_new+1) / d_common.mask_conv_rows + 1; // (1-n)
if((ei_new+1) % d_common.mask_conv_rows == 0){
ic = d_common.mask_conv_rows;
jc = jc-1;
}
//
j = jc + d_common.mask_conv_joffset;
jp1 = j + 1;
if(d_common.mask_cols < jp1){
ja1 = jp1 - d_common.mask_cols;
}
else{
ja1 = 1;
}
if(d_common.tMask_cols < j){
ja2 = d_common.tMask_cols;
}
else{
ja2 = j;
}
i = ic + d_common.mask_conv_ioffset;
ip1 = i + 1;
if(d_common.mask_rows < ip1){
ia1 = ip1 - d_common.mask_rows;
}
else{
ia1 = 1;
}
if(d_common.tMask_rows < i){
ia2 = d_common.tMask_rows;
}
else{
ia2 = i;
}
s = 0;
for(ja=ja1; ja<=ja2; ja++){
jb = jp1 - ja;
for(ia=ia1; ia<=ia2; ia++){
ib = ip1 - ia;
s = s + d_unique[bx].d_tMask[d_common.tMask_rows*(ja-1)+ia-1] * 1;
}
}
// //d_unique[bx].d_mask_conv[d_common.mask_conv_rows*(jc-1)+ic-1] = s;
d_unique[bx].d_mask_conv[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * s;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
//======================================================================================================================================================
// MAXIMUM VALUE
//======================================================================================================================================================
//====================================================================================================
// INITIAL SEARCH
//====================================================================================================
ei_new = tx;
while(ei_new < d_common.mask_conv_rows){
for(i=0; i<d_common.mask_conv_cols; i++){
largest_coordinate_current = ei_new*d_common.mask_conv_rows+i;
largest_value_current = abs(d_unique[bx].d_mask_conv[largest_coordinate_current]);
if(largest_value_current > largest_value){
largest_coordinate = largest_coordinate_current;
largest_value = largest_value_current;
}
}
par_max_coo[ei_new] = largest_coordinate;
par_max_val[ei_new] = largest_value;
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
//====================================================================================================
// SYNCHRONIZE THREADS
//====================================================================================================
__syncthreads();
//====================================================================================================
// FINAL SEARCH
//====================================================================================================
if(tx == 0){
for(i = 0; i < d_common.mask_conv_rows; i++){
if(par_max_val[i] > fin_max_val){
fin_max_val = par_max_val[i];
fin_max_coo = par_max_coo[i];
}
}
// convert coordinate to row/col form
largest_row = (fin_max_coo+1) % d_common.mask_conv_rows - 1; // (0-n) row
largest_col = (fin_max_coo+1) / d_common.mask_conv_rows; // (0-n) column
if((fin_max_coo+1) % d_common.mask_conv_rows == 0){
largest_row = d_common.mask_conv_rows - 1;
largest_col = largest_col - 1;
}
// calculate offset
largest_row = largest_row + 1; // compensate to match MATLAB format (1-n)
largest_col = largest_col + 1; // compensate to match MATLAB format (1-n)
offset_row = largest_row - d_common.in_rows - (d_common.sSize - d_common.tSize);
offset_col = largest_col - d_common.in_cols - (d_common.sSize - d_common.tSize);
pointer = d_common_change.frame_no+d_unique[bx].point_no*d_common.no_frames;
d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no] + offset_row;
d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no] + offset_col;
}
//======================================================================================================================================================
// SYNCHRONIZE THREADS
//======================================================================================================================================================
__syncthreads();
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// COORDINATE AND TEMPLATE UPDATE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// time19 = clock();
// if the last frame in the bath, update template
if(d_common_change.frame_no != 0 && (d_common_change.frame_no)%10 == 0){
// update coordinate
loc_pointer = d_unique[bx].point_no*d_common.no_frames+d_common_change.frame_no;
d_unique[bx].d_Row[d_unique[bx].point_no] = d_unique[bx].d_tRowLoc[loc_pointer];
d_unique[bx].d_Col[d_unique[bx].point_no] = d_unique[bx].d_tColLoc[loc_pointer];
// work
ei_new = tx;
while(ei_new < d_common.in_elem){
// figure out row/col location in new matrix
row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row
col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column
if((ei_new+1) % d_common.in_rows == 0){
row = d_common.in_rows - 1;
col = col-1;
}
// figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right)
ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1;
ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1;
ori_pointer = ori_col*d_common.frame_rows+ori_row;
// update template
d_in[ei_new] = d_common.alpha*d_in[ei_new] + (1.00-d_common.alpha)*d_common_change.d_frame[ori_pointer];
// go for second round
ei_new = ei_new + NUMBER_THREADS;
}
}
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// END OF FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
|
ec14b69f6ca5836ad63387562d2a12c25a918a9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlacpy_sym_in.cu normal z -> c, Tue Feb 9 16:05:29 2016
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset, clacpy, clag2z, clag2z, cgeadd.
*/
static __device__
void clacpy_sym_in_full_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to clacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_in_lower_device(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x; // row
int iby = blockIdx.y*BLK_Y; // col
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n);
for (int jj=0; jj < n; jj++) {
perm[rows[2*jj+1]] = rows[2*jj];
}
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m ) {
if ( full ) {
// full block-column, off-diagonal block
//#pragma unroll
for( int jj=0; jj < BLK_Y; ++jj )
{
int j = rows[2*(iby+jj)];
if (perm[ind] <= j)
dB[ind + (iby+jj)*lddb] = MAGMA_C_CONJ( dA[j + perm[ind]*ldda] );
else
dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj )
{
int j = rows[2*(iby+jj)];
if (perm[ind] <= j)
dB[ind + (iby+jj)*lddb] = MAGMA_C_CONJ( dA[j + perm[ind]*ldda] );
else
dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda];
}
}
}
}
/*
Similar to clacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_in_upper_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void clacpy_sym_in_full_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_in_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_in_lower_kernel(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_in_lower_device(m, n, rows, perm, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_in_upper_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_in_upper_device(m, n, dA, ldda, dB, lddb);
}
/**
Purpose
-------
CLACPY_Q copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as CLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of rows that are swapped. N >= 0.
@param[in]
rows INTEGER array, on GPU, dimension (2*n)
On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th
rows are swapped.
@param[in,out]
perm INTEGER array, on GPU, dimension (m)
On entry, it stores the identity permutation array.
On exit, it is updated with the new pivots given by rows such that
i-th row will be the original perm[i]-th row after the pivots are applied.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX array, dimension (LDDB,N)
On exit, dB = stores the columns after the pivots are applied.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_clacpy_sym_in_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magma_int_t *rows, magma_int_t *perm,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( clacpy_sym_in_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, rows, perm, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( clacpy_sym_in_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb );
}
else {
hipLaunchKernelGGL(( clacpy_sym_in_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb );
}
}
| ec14b69f6ca5836ad63387562d2a12c25a918a9b.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlacpy_sym_in.cu normal z -> c, Tue Feb 9 16:05:29 2016
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset, clacpy, clag2z, clag2z, cgeadd.
*/
static __device__
void clacpy_sym_in_full_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to clacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_in_lower_device(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x; // row
int iby = blockIdx.y*BLK_Y; // col
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n);
for (int jj=0; jj < n; jj++) {
perm[rows[2*jj+1]] = rows[2*jj];
}
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m ) {
if ( full ) {
// full block-column, off-diagonal block
//#pragma unroll
for( int jj=0; jj < BLK_Y; ++jj )
{
int j = rows[2*(iby+jj)];
if (perm[ind] <= j)
dB[ind + (iby+jj)*lddb] = MAGMA_C_CONJ( dA[j + perm[ind]*ldda] );
else
dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj )
{
int j = rows[2*(iby+jj)];
if (perm[ind] <= j)
dB[ind + (iby+jj)*lddb] = MAGMA_C_CONJ( dA[j + perm[ind]*ldda] );
else
dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda];
}
}
}
}
/*
Similar to clacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_sym_in_upper_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void clacpy_sym_in_full_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_in_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_in_lower_kernel(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_in_lower_device(m, n, rows, perm, dA, ldda, dB, lddb);
}
__global__
void clacpy_sym_in_upper_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_sym_in_upper_device(m, n, dA, ldda, dB, lddb);
}
/**
Purpose
-------
CLACPY_Q copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as CLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of rows that are swapped. N >= 0.
@param[in]
rows INTEGER array, on GPU, dimension (2*n)
On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th
rows are swapped.
@param[in,out]
perm INTEGER array, on GPU, dimension (m)
On entry, it stores the identity permutation array.
On exit, it is updated with the new pivots given by rows such that
i-th row will be the original perm[i]-th row after the pivots are applied.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX array, dimension (LDDB,N)
On exit, dB = stores the columns after the pivots are applied.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_clacpy_sym_in_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magma_int_t *rows, magma_int_t *perm,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) );
if ( uplo == MagmaLower ) {
clacpy_sym_in_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, rows, perm, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
clacpy_sym_in_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb );
}
else {
clacpy_sym_in_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb );
}
}
|
f85692ce853fda583cb982e49565cfcf9c6419db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void average_pooling2d(const size_t threads, const float *input_data,
float *output_data, const size_t N,
const size_t C, const size_t H,
const size_t W, const size_t kernel_H,
const size_t kernel_W, const size_t p_H,
const size_t p_W, const size_t padding,
const size_t stride) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= threads)
return;
size_t idx = id;
size_t idx_W = idx % p_W;
idx /= p_W;
size_t idx_H = idx % p_H;
idx /= p_H;
size_t idx_C = idx % C;
size_t idx_N = idx / C;
int hs = (int)idx_H * stride - padding;
int ws = (int)idx_W * stride - padding;
size_t hend = min(hs + kernel_H, H);
size_t wend = min(ws + kernel_W, W);
hs = max(hs, 0);
ws = max(ws, 0);
float temp = 0;
for (index_t i = hs; i < hend; i++) {
for (index_t j = ws; j < wend; j++) {
temp += input_data[idx_N * C * H * W + idx_C * H * W + i * W + j];
}
}
output_data[id] = temp / (kernel_H * kernel_W);
}
int DLGpuAvgerage_Pooling2d(const DLArrayHandle input, const size_t kernel_H,
const size_t kernel_W, DLArrayHandle output,
const size_t padding, const size_t stride,
DLStreamHandle stream_handle = NULL) {
size_t input_N = input->shape[0];
size_t input_C = input->shape[1];
size_t input_H = input->shape[2];
size_t input_W = input->shape[3];
size_t output_H = output->shape[2];
size_t output_W = output->shape[3];
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
size_t pooled_H = (input_H + 2 * padding - kernel_H) / stride + 1;
size_t pooled_W = (input_W + 2 * padding - kernel_W) / stride + 1;
size_t output_size = input_N * input_C * output_H * output_W;
size_t BLOCKS = (output_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (stream_handle)
hipLaunchKernelGGL(( average_pooling2d), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0,
*(hipStream_t *)stream_handle->handle,
output_size, input_data, output_data, input_N, input_C, input_H,
input_W, kernel_H, kernel_W, pooled_H, pooled_W, padding, stride);
else
hipLaunchKernelGGL(( average_pooling2d), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, input_data, output_data, input_N, input_C, input_H,
input_W, kernel_H, kernel_W, pooled_H, pooled_W, padding, stride);
return 0;
}
__global__ void average_pooling2d_gradient(
const size_t threads, const float *input_data, float *output_data,
const size_t N, const size_t C, const size_t H, const size_t W,
const size_t kernel_H, const size_t kernel_W, const size_t p_H,
const size_t p_W, const size_t padding, const size_t stride) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= threads)
return;
size_t idx = id;
size_t idx_W = idx % p_W;
idx /= p_W;
size_t idx_H = idx % p_H;
idx /= p_H;
size_t idx_C = idx % C;
size_t idx_N = idx / C;
size_t hs = (idx_H < kernel_H) ? 0 : (idx_H - kernel_H) / stride + 1;
size_t hend = min(idx_H / stride + 1, H);
size_t ws = (idx_W < kernel_W) ? 0 : (idx_W - kernel_W) / stride + 1;
size_t wend = min(idx_W / stride + 1, W);
float temp = 0;
const size_t pooling_size = kernel_H * kernel_W;
for (index_t i = hs; i < hend; i++) {
for (index_t j = ws; j < wend; j++) {
temp += input_data[idx_N * C * H * W + idx_C * H * W + i * W + j];
}
}
output_data[id] = temp / pooling_size;
}
int DLGpuAvgerage_Pooling2d_gradient(const DLArrayHandle gradient_Y,
const size_t kernel_H,
const size_t kernel_W,
DLArrayHandle gradient_X,
const size_t padding, const size_t stride,
DLStreamHandle stream_handle = NULL) {
size_t N = gradient_Y->shape[0];
size_t C = gradient_Y->shape[1];
size_t H = gradient_Y->shape[2];
size_t W = gradient_Y->shape[3];
size_t pooled_H = gradient_X->shape[2];
size_t pooled_W = gradient_X->shape[3];
const float *input_data = (const float *)gradient_Y->data;
float *output_data = (float *)gradient_X->data;
size_t output_size = N * C * pooled_H * pooled_W;
size_t BLOCKS = (output_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (stream_handle)
hipLaunchKernelGGL(( average_pooling2d_gradient), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0,
*(hipStream_t *)stream_handle->handle,
output_size, input_data, output_data, N, C, H, W, kernel_H,
kernel_W, pooled_H, pooled_W, padding, stride);
else
hipLaunchKernelGGL(( average_pooling2d_gradient), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, input_data, output_data, N, C, H, W, kernel_H,
kernel_W, pooled_H, pooled_W, padding, stride);
return 0;
}
| f85692ce853fda583cb982e49565cfcf9c6419db.cu | #include "gpu_runtime.h"
__global__ void average_pooling2d(const size_t threads, const float *input_data,
float *output_data, const size_t N,
const size_t C, const size_t H,
const size_t W, const size_t kernel_H,
const size_t kernel_W, const size_t p_H,
const size_t p_W, const size_t padding,
const size_t stride) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= threads)
return;
size_t idx = id;
size_t idx_W = idx % p_W;
idx /= p_W;
size_t idx_H = idx % p_H;
idx /= p_H;
size_t idx_C = idx % C;
size_t idx_N = idx / C;
int hs = (int)idx_H * stride - padding;
int ws = (int)idx_W * stride - padding;
size_t hend = min(hs + kernel_H, H);
size_t wend = min(ws + kernel_W, W);
hs = max(hs, 0);
ws = max(ws, 0);
float temp = 0;
for (index_t i = hs; i < hend; i++) {
for (index_t j = ws; j < wend; j++) {
temp += input_data[idx_N * C * H * W + idx_C * H * W + i * W + j];
}
}
output_data[id] = temp / (kernel_H * kernel_W);
}
int DLGpuAvgerage_Pooling2d(const DLArrayHandle input, const size_t kernel_H,
const size_t kernel_W, DLArrayHandle output,
const size_t padding, const size_t stride,
DLStreamHandle stream_handle = NULL) {
size_t input_N = input->shape[0];
size_t input_C = input->shape[1];
size_t input_H = input->shape[2];
size_t input_W = input->shape[3];
size_t output_H = output->shape[2];
size_t output_W = output->shape[3];
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
size_t pooled_H = (input_H + 2 * padding - kernel_H) / stride + 1;
size_t pooled_W = (input_W + 2 * padding - kernel_W) / stride + 1;
size_t output_size = input_N * input_C * output_H * output_W;
size_t BLOCKS = (output_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (stream_handle)
average_pooling2d<<<BLOCKS, THREADS_PER_BLOCK, 0,
*(cudaStream_t *)stream_handle->handle>>>(
output_size, input_data, output_data, input_N, input_C, input_H,
input_W, kernel_H, kernel_W, pooled_H, pooled_W, padding, stride);
else
average_pooling2d<<<BLOCKS, THREADS_PER_BLOCK>>>(
output_size, input_data, output_data, input_N, input_C, input_H,
input_W, kernel_H, kernel_W, pooled_H, pooled_W, padding, stride);
return 0;
}
__global__ void average_pooling2d_gradient(
const size_t threads, const float *input_data, float *output_data,
const size_t N, const size_t C, const size_t H, const size_t W,
const size_t kernel_H, const size_t kernel_W, const size_t p_H,
const size_t p_W, const size_t padding, const size_t stride) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= threads)
return;
size_t idx = id;
size_t idx_W = idx % p_W;
idx /= p_W;
size_t idx_H = idx % p_H;
idx /= p_H;
size_t idx_C = idx % C;
size_t idx_N = idx / C;
size_t hs = (idx_H < kernel_H) ? 0 : (idx_H - kernel_H) / stride + 1;
size_t hend = min(idx_H / stride + 1, H);
size_t ws = (idx_W < kernel_W) ? 0 : (idx_W - kernel_W) / stride + 1;
size_t wend = min(idx_W / stride + 1, W);
float temp = 0;
const size_t pooling_size = kernel_H * kernel_W;
for (index_t i = hs; i < hend; i++) {
for (index_t j = ws; j < wend; j++) {
temp += input_data[idx_N * C * H * W + idx_C * H * W + i * W + j];
}
}
output_data[id] = temp / pooling_size;
}
int DLGpuAvgerage_Pooling2d_gradient(const DLArrayHandle gradient_Y,
const size_t kernel_H,
const size_t kernel_W,
DLArrayHandle gradient_X,
const size_t padding, const size_t stride,
DLStreamHandle stream_handle = NULL) {
size_t N = gradient_Y->shape[0];
size_t C = gradient_Y->shape[1];
size_t H = gradient_Y->shape[2];
size_t W = gradient_Y->shape[3];
size_t pooled_H = gradient_X->shape[2];
size_t pooled_W = gradient_X->shape[3];
const float *input_data = (const float *)gradient_Y->data;
float *output_data = (float *)gradient_X->data;
size_t output_size = N * C * pooled_H * pooled_W;
size_t BLOCKS = (output_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (stream_handle)
average_pooling2d_gradient<<<BLOCKS, THREADS_PER_BLOCK, 0,
*(cudaStream_t *)stream_handle->handle>>>(
output_size, input_data, output_data, N, C, H, W, kernel_H,
kernel_W, pooled_H, pooled_W, padding, stride);
else
average_pooling2d_gradient<<<BLOCKS, THREADS_PER_BLOCK>>>(
output_size, input_data, output_data, N, C, H, W, kernel_H,
kernel_W, pooled_H, pooled_W, padding, stride);
return 0;
}
|
8d31e7056bece047a5289db3fe097af6f2767f86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <torch/library.h>
// Copied and adapted from
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/KernelUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
// Below is experimental temporary code before merging it to PyTorch
namespace at {
namespace native {
namespace internal_upsample {
__device__ __forceinline__ size_t
idx(const size_t nc,
const size_t height,
const size_t width,
const size_t y,
const size_t x) {
return (nc * height + y) * width + x;
}
// taken from
// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/
// src/libImaging/Resample.c#L20-L29
template <typename accscalar_t>
__device__ __forceinline__ static accscalar_t bilinear_filter(accscalar_t x) {
if (x < 0.0) {
x = -x;
}
if (x < 1.0) {
return static_cast<accscalar_t>(1.0) - x;
}
return static_cast<accscalar_t>(0.0);
}
// taken from
// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/
// src/libImaging/Resample.c#L46-L62
template <typename accscalar_t>
__device__ __forceinline__ static accscalar_t bicubic_filter(accscalar_t x) {
// https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm
#define a -0.5
if (x < 0.0) {
x = -x;
}
if (x < 1.0) {
return ((a + 2.0) * x - (a + 3.0)) * x * x + static_cast<accscalar_t>(1.0);
}
if (x < 2.0) {
return (((x - 5) * x + 8) * x - 4) * a;
}
return static_cast<accscalar_t>(0.0);
#undef a
}
template <typename scalar_t, typename accscalar_t, typename filter_fn_t>
__device__ __forceinline__ static void _compute_weights(
const int i,
const int input_size,
const accscalar_t scale,
const accscalar_t support,
scalar_t* wt_ptr,
int interp_size,
filter_fn_t filter_fn,
int& xmin,
int& xmax) {
accscalar_t invscale = (scale >= 1.0) ? 1.0 / scale : 1.0;
accscalar_t center = scale * (i + 0.5);
xmin = max(static_cast<int>(center - support + 0.5), static_cast<int>(0));
xmax = min(static_cast<int>(center + support + 0.5), input_size) - xmin;
accscalar_t total_w = 0.0;
int j = 0;
for (j = 0; j < xmax; j++) {
accscalar_t w = filter_fn((j + xmin - center + 0.5) * invscale);
wt_ptr[j] = static_cast<scalar_t>(w);
total_w += w;
}
for (j = 0; j < xmax; j++) {
if (total_w != 0.0) {
wt_ptr[j] /= total_w;
}
}
for (; j < interp_size; j++) {
wt_ptr[j] = static_cast<scalar_t>(0.0);
}
}
template <typename scalar_t, typename accscalar_t>
__device__ __forceinline__ static accscalar_t interpolate_aa_single_dim(
scalar_t* src,
scalar_t* weights,
int64_t size) {
scalar_t t = static_cast<accscalar_t>(*src);
scalar_t wts = static_cast<accscalar_t>(weights[0]);
accscalar_t output = t * wts;
int64_t j = 1;
for (; j < size; j++) {
wts = static_cast<accscalar_t>(weights[j]);
t = static_cast<accscalar_t>(*(src + j));
output += t * wts;
}
return output;
}
template <typename scalar_t, typename accscalar_t, int interp_size>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_gen2d_out_frame(
const int n,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int height1 = idata.size(2);
const int width1 = idata.size(3);
const int height2 = odata.size(2);
const int width2 = odata.size(3);
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][h1][w1];
odata[n][c][h2][w2] = val;
}
}
return;
}
const accscalar_t support_h = static_cast<accscalar_t>(
(rheight >= 1.0) ? (interp_size * 0.5) * rheight : interp_size * 0.5);
const accscalar_t support_w = static_cast<accscalar_t>(
(rwidth >= 1.0) ? (interp_size * 0.5) * rwidth : interp_size * 0.5);
const int interp_height = (int)ceilf(support_h) * 2 + 1;
const int interp_width = (int)ceilf(support_w) * 2 + 1;
// Setup local buffers
// TODO: maybe we can specify dynamic shared memory size before calling the
// cuda code, however we should then ensure that device has enough shared
// memory
scalar_t wx[256];
scalar_t wy[256];
scalar_t buffer1[256];
scalar_t buffer2[256];
// Compute weights
int xmin, xsize, ymin, ysize;
typedef scalar_t (*filter_fn_t)(scalar_t);
if (interp_size == 2) {
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
w2,
width1,
rwidth,
support_w,
wx,
interp_width,
bilinear_filter,
xmin,
xsize);
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
h2,
height1,
rheight,
support_h,
wy,
interp_height,
bilinear_filter,
ymin,
ysize);
} else if (interp_size == 4) {
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
w2,
width1,
rwidth,
support_w,
wx,
interp_width,
bicubic_filter,
xmin,
xsize);
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
h2,
height1,
rheight,
support_h,
wy,
interp_height,
bicubic_filter,
ymin,
ysize);
}
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
// interpolate on x-axis for ymin to ymin + ysize
for (int y = 0; y < ysize; y++) {
// copy data into the local buffer and use
// interpolate_aa_single_dim method
for (int x = 0; x < xsize; x++) {
buffer1[x] = idata[n][c][ymin + y][xmin + x];
}
buffer2[y] = static_cast<scalar_t>(
interpolate_aa_single_dim<scalar_t, accscalar_t>(
buffer1, wx, xsize));
}
odata[n][c][h2][w2] = static_cast<scalar_t>(
interpolate_aa_single_dim<scalar_t, accscalar_t>(
buffer2, wy, ysize));
}
}
}
}
template <int interp_size>
static void upsample_gen2d_out_cuda_template(
const Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_gen2d_out_cuda", {input_arg, output_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
const int num_kernels = output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bilinear2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
// We are using static buffer memory of 256 * sizeof(float) per thread
// to store weights. Size of weights array is
// interp_size = scale * 2 + 1 for bilinear mode
TORCH_CHECK(
rheight < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
TORCH_CHECK(
rwidth < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
hipLaunchKernelGGL(( upsample_gen2d_out_frame<scalar_t, accscalar_t, interp_size>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream,
num_kernels, rheight, rwidth, align_corners, idata, odata);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} // namespace internal_upsample
} // namespace native
} // namespace at
namespace vision {
namespace ops {
namespace {
// Copied from "UpSample.h" as we can not use UpSample.h with UpSample.cuh
static std::array<int64_t, 4> upsample_2d_common_check(
at::IntArrayRef input_size,
at::IntArrayRef output_size) {
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int64_t output_height = output_size[0];
int64_t output_width = output_size[1];
int64_t nbatch = input_size[0];
int64_t channels = input_size[1];
int64_t input_height = input_size[2];
int64_t input_width = input_size[3];
TORCH_CHECK(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0,
"Input and output sizes should be greater than 0,"
" but got input (H: ",
input_height,
", W: ",
input_width,
") output (H: ",
output_height,
", W: ",
output_width,
")");
return {nbatch, channels, output_height, output_width};
}
template <int interp_size>
at::Tensor interpolate_gen2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
c10::optional<c10::ArrayRef<double>> scale_factors = {};
// Copied from UpSampleBilinear2d.cpp
auto output = at::empty({0}, input.options());
auto osize = at::native::upsample::compute_output_size(
input.sizes(), output_size, scale_factors);
auto scale_h = at::native::upsample_cuda::get_scale_value(scale_factors, 0);
auto scale_w = at::native::upsample_cuda::get_scale_value(scale_factors, 1);
auto full_output_size = upsample_2d_common_check(input.sizes(), osize);
// Allow for empty batch size but not other dimensions
TORCH_CHECK(
input.numel() != 0 ||
c10::multiply_integers(
input.sizes().begin() + 1, input.sizes().end()),
"Non-empty 4D data tensor expected but got a tensor with sizes ",
input.sizes());
output.resize_(full_output_size, input.suggest_memory_format());
at::native::internal_upsample::upsample_gen2d_out_cuda_template<interp_size>(
output,
input,
{full_output_size[2], full_output_size[3]},
align_corners,
scale_h,
scale_w);
return output;
}
at::Tensor interpolate_bilinear2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
return interpolate_gen2d_aa_forward_kernel<2>(
input, output_size, align_corners);
}
at::Tensor interpolate_bicubic2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
return interpolate_gen2d_aa_forward_kernel<4>(
input, output_size, align_corners);
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bilinear2d_aa"),
TORCH_FN(interpolate_bilinear2d_aa_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bicubic2d_aa"),
TORCH_FN(interpolate_bicubic2d_aa_forward_kernel));
}
} // namespace ops
} // namespace vision
| 8d31e7056bece047a5289db3fe097af6f2767f86.cu | #include <torch/library.h>
// Copied and adapted from
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/KernelUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
// Below is experimental temporary code before merging it to PyTorch
namespace at {
namespace native {
namespace internal_upsample {
__device__ __forceinline__ size_t
idx(const size_t nc,
const size_t height,
const size_t width,
const size_t y,
const size_t x) {
return (nc * height + y) * width + x;
}
// taken from
// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/
// src/libImaging/Resample.c#L20-L29
template <typename accscalar_t>
__device__ __forceinline__ static accscalar_t bilinear_filter(accscalar_t x) {
if (x < 0.0) {
x = -x;
}
if (x < 1.0) {
return static_cast<accscalar_t>(1.0) - x;
}
return static_cast<accscalar_t>(0.0);
}
// taken from
// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/
// src/libImaging/Resample.c#L46-L62
template <typename accscalar_t>
__device__ __forceinline__ static accscalar_t bicubic_filter(accscalar_t x) {
// https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm
#define a -0.5
if (x < 0.0) {
x = -x;
}
if (x < 1.0) {
return ((a + 2.0) * x - (a + 3.0)) * x * x + static_cast<accscalar_t>(1.0);
}
if (x < 2.0) {
return (((x - 5) * x + 8) * x - 4) * a;
}
return static_cast<accscalar_t>(0.0);
#undef a
}
template <typename scalar_t, typename accscalar_t, typename filter_fn_t>
__device__ __forceinline__ static void _compute_weights(
const int i,
const int input_size,
const accscalar_t scale,
const accscalar_t support,
scalar_t* wt_ptr,
int interp_size,
filter_fn_t filter_fn,
int& xmin,
int& xmax) {
accscalar_t invscale = (scale >= 1.0) ? 1.0 / scale : 1.0;
accscalar_t center = scale * (i + 0.5);
xmin = max(static_cast<int>(center - support + 0.5), static_cast<int>(0));
xmax = min(static_cast<int>(center + support + 0.5), input_size) - xmin;
accscalar_t total_w = 0.0;
int j = 0;
for (j = 0; j < xmax; j++) {
accscalar_t w = filter_fn((j + xmin - center + 0.5) * invscale);
wt_ptr[j] = static_cast<scalar_t>(w);
total_w += w;
}
for (j = 0; j < xmax; j++) {
if (total_w != 0.0) {
wt_ptr[j] /= total_w;
}
}
for (; j < interp_size; j++) {
wt_ptr[j] = static_cast<scalar_t>(0.0);
}
}
template <typename scalar_t, typename accscalar_t>
__device__ __forceinline__ static accscalar_t interpolate_aa_single_dim(
scalar_t* src,
scalar_t* weights,
int64_t size) {
scalar_t t = static_cast<accscalar_t>(*src);
scalar_t wts = static_cast<accscalar_t>(weights[0]);
accscalar_t output = t * wts;
int64_t j = 1;
for (; j < size; j++) {
wts = static_cast<accscalar_t>(weights[j]);
t = static_cast<accscalar_t>(*(src + j));
output += t * wts;
}
return output;
}
template <typename scalar_t, typename accscalar_t, int interp_size>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_gen2d_out_frame(
const int n,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int height1 = idata.size(2);
const int width1 = idata.size(3);
const int height2 = odata.size(2);
const int width2 = odata.size(3);
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][h1][w1];
odata[n][c][h2][w2] = val;
}
}
return;
}
const accscalar_t support_h = static_cast<accscalar_t>(
(rheight >= 1.0) ? (interp_size * 0.5) * rheight : interp_size * 0.5);
const accscalar_t support_w = static_cast<accscalar_t>(
(rwidth >= 1.0) ? (interp_size * 0.5) * rwidth : interp_size * 0.5);
const int interp_height = (int)ceilf(support_h) * 2 + 1;
const int interp_width = (int)ceilf(support_w) * 2 + 1;
// Setup local buffers
// TODO: maybe we can specify dynamic shared memory size before calling the
// cuda code, however we should then ensure that device has enough shared
// memory
scalar_t wx[256];
scalar_t wy[256];
scalar_t buffer1[256];
scalar_t buffer2[256];
// Compute weights
int xmin, xsize, ymin, ysize;
typedef scalar_t (*filter_fn_t)(scalar_t);
if (interp_size == 2) {
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
w2,
width1,
rwidth,
support_w,
wx,
interp_width,
bilinear_filter,
xmin,
xsize);
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
h2,
height1,
rheight,
support_h,
wy,
interp_height,
bilinear_filter,
ymin,
ysize);
} else if (interp_size == 4) {
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
w2,
width1,
rwidth,
support_w,
wx,
interp_width,
bicubic_filter,
xmin,
xsize);
_compute_weights<scalar_t, accscalar_t, filter_fn_t>(
h2,
height1,
rheight,
support_h,
wy,
interp_height,
bicubic_filter,
ymin,
ysize);
}
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
// interpolate on x-axis for ymin to ymin + ysize
for (int y = 0; y < ysize; y++) {
// copy data into the local buffer and use
// interpolate_aa_single_dim method
for (int x = 0; x < xsize; x++) {
buffer1[x] = idata[n][c][ymin + y][xmin + x];
}
buffer2[y] = static_cast<scalar_t>(
interpolate_aa_single_dim<scalar_t, accscalar_t>(
buffer1, wx, xsize));
}
odata[n][c][h2][w2] = static_cast<scalar_t>(
interpolate_aa_single_dim<scalar_t, accscalar_t>(
buffer2, wy, ysize));
}
}
}
}
template <int interp_size>
static void upsample_gen2d_out_cuda_template(
const Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_gen2d_out_cuda", {input_arg, output_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
const int num_kernels = output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bilinear2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
// We are using static buffer memory of 256 * sizeof(float) per thread
// to store weights. Size of weights array is
// interp_size = scale * 2 + 1 for bilinear mode
TORCH_CHECK(
rheight < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
TORCH_CHECK(
rwidth < (255 / interp_size),
"Max supported scale factor is 127 (bilinear), 63 (bicubic)");
upsample_gen2d_out_frame<scalar_t, accscalar_t, interp_size>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(
num_kernels, rheight, rwidth, align_corners, idata, odata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} // namespace internal_upsample
} // namespace native
} // namespace at
namespace vision {
namespace ops {
namespace {
// Copied from "UpSample.h" as we can not use UpSample.h with UpSample.cuh
static std::array<int64_t, 4> upsample_2d_common_check(
at::IntArrayRef input_size,
at::IntArrayRef output_size) {
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int64_t output_height = output_size[0];
int64_t output_width = output_size[1];
int64_t nbatch = input_size[0];
int64_t channels = input_size[1];
int64_t input_height = input_size[2];
int64_t input_width = input_size[3];
TORCH_CHECK(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0,
"Input and output sizes should be greater than 0,"
" but got input (H: ",
input_height,
", W: ",
input_width,
") output (H: ",
output_height,
", W: ",
output_width,
")");
return {nbatch, channels, output_height, output_width};
}
template <int interp_size>
at::Tensor interpolate_gen2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
c10::optional<c10::ArrayRef<double>> scale_factors = {};
// Copied from UpSampleBilinear2d.cpp
auto output = at::empty({0}, input.options());
auto osize = at::native::upsample::compute_output_size(
input.sizes(), output_size, scale_factors);
auto scale_h = at::native::upsample_cuda::get_scale_value(scale_factors, 0);
auto scale_w = at::native::upsample_cuda::get_scale_value(scale_factors, 1);
auto full_output_size = upsample_2d_common_check(input.sizes(), osize);
// Allow for empty batch size but not other dimensions
TORCH_CHECK(
input.numel() != 0 ||
c10::multiply_integers(
input.sizes().begin() + 1, input.sizes().end()),
"Non-empty 4D data tensor expected but got a tensor with sizes ",
input.sizes());
output.resize_(full_output_size, input.suggest_memory_format());
at::native::internal_upsample::upsample_gen2d_out_cuda_template<interp_size>(
output,
input,
{full_output_size[2], full_output_size[3]},
align_corners,
scale_h,
scale_w);
return output;
}
at::Tensor interpolate_bilinear2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
return interpolate_gen2d_aa_forward_kernel<2>(
input, output_size, align_corners);
}
at::Tensor interpolate_bicubic2d_aa_forward_kernel(
const at::Tensor& input,
at::IntArrayRef output_size,
bool align_corners) {
return interpolate_gen2d_aa_forward_kernel<4>(
input, output_size, align_corners);
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bilinear2d_aa"),
TORCH_FN(interpolate_bilinear2d_aa_forward_kernel));
m.impl(
TORCH_SELECTIVE_NAME("torchvision::_interpolate_bicubic2d_aa"),
TORCH_FN(interpolate_bicubic2d_aa_forward_kernel));
}
} // namespace ops
} // namespace vision
|
c953ab3878d86e3135eb8c1bf1c798e3df09dc9f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
void cudaHandleError( hipError_t err,const char *file,int line ) {
if (err != hipSuccess) {
printf( "CUDA Error\n%s in %s at line %d\n", hipGetErrorString( err ),file, line );
exit( EXIT_FAILURE );
}
}
| c953ab3878d86e3135eb8c1bf1c798e3df09dc9f.cu | #include <stdio.h>
#include <stdlib.h>
void cudaHandleError( cudaError_t err,const char *file,int line ) {
if (err != cudaSuccess) {
printf( "CUDA Error\n%s in %s at line %d\n", cudaGetErrorString( err ),file, line );
exit( EXIT_FAILURE );
}
}
|
2dfa9e0d8cbdd3b8ac679dad0516f7af4b6a13c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <cmath>
#include "ContourCUDA.h"
__global__ void countEdges(float *vertexes, int nRows, int nCols, int *numExpectedPoints, float level)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x; //found using the dimensions, based on the file given on the in-class materials page
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = y * gridDim.x * blockDim.x + x;
int bound = ((nRows * nCols) - 1) - nCols;
if (index <= bound && ((index + 1) % nCols != 0)) //check if row below top row, and column before last column
{
//local values to determine how many edges to expect from the grid
int count = 0;
int nAbove = 0;
int nBelow = 0;
//each point to check
float bottomL = vertexes[index];
float bottomR = vertexes[index + 1];
float topL = vertexes[index + nCols];
float topR = vertexes[index + nCols + 1];
//check if values are above or below the level, add accordingly
if (bottomL > level)
{
nAbove++;
}
else
{
nBelow++;
}
if (bottomR > level)
{
nAbove++;
}
else
{
nBelow++;
}
if (topL > level)
{
nAbove++;
}
else
{
nBelow++;
}
if (topR > level)
{
nAbove++;
}
else
{
nBelow++;
}
//calculate number of expected edges based on how many vertices were below or above the desired level
if (nAbove == 3 && nBelow == 1)
{
count = 1;
}
else if (nAbove == 1 && nBelow == 3)
{
count = 1;
}
else if (nAbove == 2 && nBelow == 2)
{
count = 2;
}
else
{
count = 0;
}
atomicAdd(numExpectedPoints, count); //add to the number of expected edges total
}
}
__global__ void computeKernel(float *vertexes, int nRows, int nCols, int level, int *edgeCount, vec2 *actualEdgePoints, int *buf_location)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = y * gridDim.x * blockDim.x + x; //the index in the vertex array, acquired by multiplying the dimensions of each block and the threads within those blocks
int bound = ((nRows * nCols) - 1) - nCols; //we do not want to check the top row of the grid or the index at the farthest right
if (index <= bound && ((index % nCols) != nCols - 1)) //check if row below top row, and column before last column
{
//each point to check
float bottomL = vertexes[index];
float bottomR = vertexes[index + 1];
float topL = vertexes[index + nCols];
float topR = vertexes[index + nCols + 1];
int loc; //the location of our index in the actualEdgePoints array so that we do not overlap edge points
bool vertfound = false; //if we have found one vertex already
int count = 0; //the number of vertexes we have found so far pertaining to one edge
float x_coord = -1.0;
float y_coord = -1.0;
//check for missing data and return if found missing
if (bottomL == -9999 || bottomR == -9999 || topL == -9999 || topR == -9999)
{
return; //do not check
}
//check every corner of the square starting from the bottom line, to right vertical, top horizontal, then left vertical
if ((bottomL <= level && level <= bottomR) || (bottomL > level && level > bottomR)) //if the level is between the two points, not dependent on which corner is greater
{
if (bottomL <= level && level <= bottomR) //if the bottom right is greater
{
float f = (level - bottomL) / (bottomR - bottomL); //using the function given to find the coordinate between points
x_coord = (1.0 - f) * (index % nCols) + f * ((index + 1) % nCols); //use that percentage and attribute it to x and y values, depending on which part of the square we are checking
y_coord = (float)(index / nCols); //use the normal y coordinate
}
else if (bottomL > level && level > bottomR) //bottom left is greater, so the function is switched backwards
{
float f = (level - bottomR) / (bottomL - bottomR);
x_coord = (1.0 - f) * ((index + 1) % nCols) + f * (index % nCols);
y_coord = (float)(index / nCols);
}
if (!vertfound) //we have not found a vertice already, this is the first point of our edge
{
loc = atomicAdd(buf_location, 2); //get the index to add this vertex coordinate set to the actualEdgePoint array
vertfound = true; //set to true so that we know we are on our second vertex of a certain edge
}
actualEdgePoints[loc + count][0] = x_coord; //set the coordinates of the vertex
actualEdgePoints[loc + count][1] = y_coord;
count++; //add to know how many vertices we have added so far
if (count == 2) //checks if we have completed our edge with 2 vertices, reset the edge count
{
vertfound = 0;
count = 0;
atomicAdd(edgeCount, 1); //add to the total number of edges that we have
}
}
//repeat
if ((bottomL <= level && level <= topL) || (bottomL > level && level > topL))
{
if (bottomL <= level && level <= topL)
{
float f = (level - bottomL) / (topL - bottomL);
x_coord = (float)(index % nCols);
y_coord = (1.0 - f) * (index / nCols) + f * ((index + nCols) / nCols);
}
else if (bottomL > level && level > topL)
{
float f = (level - topL) / (bottomL - topL);
x_coord = (float)(index % nCols);
y_coord = (1.0 - f) * ((index + nCols) / nCols) + f * (index / nCols);
}
if (!vertfound)
{
loc = atomicAdd(buf_location, 2);
vertfound = true;
}
actualEdgePoints[loc + count][0] = x_coord;
actualEdgePoints[loc + count][1] = y_coord;
count++;
if (count == 2)
{
vertfound = 0;
count = 0;
atomicAdd(edgeCount, 1);
}
}
if ((topR <= level && level <= topL) || (topR > level && level > topL))
{
if (topR <= level && level <= topL)
{
float f = (level - topR) / (topL - topR);
x_coord = (1.0 - f) * ((index + nCols + 1) % nCols) + f * ((index + nCols) % nCols);
y_coord = (float)((index + nCols) / nCols);
}
else if (topR > level && level > topL)
{
float f = (level - topL) / (topR - topL);
x_coord = (1.0 - f) * ((index + nCols) % nCols) + f * ((index + nCols + 1) % nCols);
y_coord = (float)((index + nCols) / nCols);
}
if (!vertfound)
{
loc = atomicAdd(buf_location, 2);
vertfound = true;
}
actualEdgePoints[loc + count][0] = x_coord;
actualEdgePoints[loc + count][1] = y_coord;
count++;
if (count == 2)
{
vertfound = 0;
count = 0;
atomicAdd(edgeCount, 1);
}
}
if ((topR <= level && level <= bottomR) || (topR > level && level > bottomR))
{
if (topR <= level && level <= bottomR)
{
float f = (level - topR) / (bottomR - topR);
x_coord = (float)((index + 1) % nCols);
y_coord = (1.0 - f) * ((index + nCols + 1) / nCols) + f * ((index + 1) / nCols);
}
else if (topR > level && level > bottomR)
{
float f = (level - bottomR) / (topR - bottomR);
x_coord = (float)((index + 1) % nCols);
y_coord = (1.0 - f) * ((index + 1) / nCols) + f * ((index + nCols + 1) / nCols);
}
if (!vertfound)
{
loc = atomicAdd(buf_location, 2);
vertfound = true;
}
actualEdgePoints[loc + count][0] = x_coord;
actualEdgePoints[loc + count][1] = y_coord;
count++;
if (count == 2)
{
vertfound = 0;
count = 0;
atomicAdd(edgeCount, 1);
}
}
}
}
int expectedEdgesKernel(float *vertexes, int nRows, int nCols, float level)
{
float *dev_varray; //device vertex array buffer to copy
int vert_size = (nRows * nCols) * sizeof(float); //size of vertex array to copy to gpu
int *dev_count; //expected edge device count variable to copy to gpu
int zero = 0; //start the device count at 0
int *host_count = &zero; //host count to copy gpu value back to cpu
hipMalloc((void**)&dev_varray, vert_size); //allocate size to hold the vertex array in gpu
hipMalloc((void**)&dev_count, sizeof(int)); //allocate one int variable on gpu to hold edge count
hipMemcpy(dev_varray, vertexes, vert_size, hipMemcpyHostToDevice); //copy vertexValues to the gpu in dev_varray
hipMemcpy(dev_count, host_count, sizeof(int), hipMemcpyHostToDevice); //copy edge count to gpu starting at 0
dim3 block(16, 16); //placeholder size for blocks only optimized for warps
dim3 grid((nRows + block.x - 1) / block.x, (nCols + block.y - 1) / block.y); //launch grid based on size of vertexValues divided by block thread size
hipLaunchKernelGGL(( countEdges), dim3(grid), dim3(block), 0, 0, dev_varray, nRows, nCols, dev_count, level); //call kernel to count expected edges
hipDeviceSynchronize(); //barrier
hipMemcpy(host_count, dev_count, sizeof(int), hipMemcpyDeviceToHost); //copy device count back to host count to pass back
hipFree(dev_varray); //free gpu vertex array
hipFree(dev_count); //free device count
return *host_count;
}
int actualEdgesKernel(float *vertexes, int nRows, int nCols, float level, int numExpectedPoints, vec2 *buffer)
{
float *dev_varray; //device vertex array buffer to copy
int vert_size = (nRows * nCols) * sizeof(float); //size of vertex array to copy to gpu
int *dev_count; //actual edges device count variable to copy to gpu
int zero = 0; //start the device count at 0
int *host_count = &zero; //host count to copy gpu value back to cpu
int *buf_location; //index of the buffer that the coordinates should be placed at so edges are correct
hipMalloc(&dev_varray, vert_size); //allocate size to hold the vertex array in gpu
hipMalloc(&dev_count, sizeof(int)); //allocate one int variable on gpu to hold actual edge count
hipMalloc(&buf_location, sizeof(int)); //allocate index of buffer we are writing coordinates to
vec2 *dev_buffer; //allocate buffer to hold points of actual edges calculated
hipMalloc(&dev_buffer, 2 * numExpectedPoints * sizeof(float)); //two points for each edge calculated
hipMemcpy(dev_varray, vertexes, vert_size, hipMemcpyHostToDevice); //copy vertexValues to the gpu in dev_varray
hipMemcpy(dev_count, host_count, sizeof(int), hipMemcpyHostToDevice); //copy edge count to gpu starting at 0
hipMemcpy(buf_location, host_count, sizeof(int), hipMemcpyHostToDevice); //copy buffer index location to gpu
hipMemcpy(dev_buffer, buffer, 2 * numExpectedPoints * sizeof(float), hipMemcpyHostToDevice);
dim3 block(16, 16); //placeholder size for blocks only optimized for warps
dim3 grid((nRows + block.x - 1) / block.x, (nCols + block.y - 1) / block.y); //launch grid based on size of vertexValues divided by block thread size
hipLaunchKernelGGL(( computeKernel), dim3(grid), dim3(block), 0, 0, dev_varray, nRows, nCols, level, dev_count, dev_buffer, buf_location); //compute actual number of edges in vertex array
hipDeviceSynchronize(); //barrier
hipMemcpy(host_count, dev_count, sizeof(int), hipMemcpyDeviceToHost); //copy back actual number of edges calculated
hipMemcpy(buffer, dev_buffer, 2 * numExpectedPoints * sizeof(float), hipMemcpyDeviceToHost); //copy the actual edges from the gpu to the actual_edge_buffer on the cpu to then fill lines
hipFree(dev_varray); //free gpu vertex array
hipFree(dev_count); //free device count
hipFree(dev_buffer); //free gpu actual edge buffer
return *host_count;
}
| 2dfa9e0d8cbdd3b8ac679dad0516f7af4b6a13c1.cu | #include <stdio.h>
#include <iostream>
#include <cmath>
#include "ContourCUDA.h"
__global__ void countEdges(float *vertexes, int nRows, int nCols, int *numExpectedPoints, float level)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x; //found using the dimensions, based on the file given on the in-class materials page
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = y * gridDim.x * blockDim.x + x;
int bound = ((nRows * nCols) - 1) - nCols;
if (index <= bound && ((index + 1) % nCols != 0)) //check if row below top row, and column before last column
{
//local values to determine how many edges to expect from the grid
int count = 0;
int nAbove = 0;
int nBelow = 0;
//each point to check
float bottomL = vertexes[index];
float bottomR = vertexes[index + 1];
float topL = vertexes[index + nCols];
float topR = vertexes[index + nCols + 1];
//check if values are above or below the level, add accordingly
if (bottomL > level)
{
nAbove++;
}
else
{
nBelow++;
}
if (bottomR > level)
{
nAbove++;
}
else
{
nBelow++;
}
if (topL > level)
{
nAbove++;
}
else
{
nBelow++;
}
if (topR > level)
{
nAbove++;
}
else
{
nBelow++;
}
//calculate number of expected edges based on how many vertices were below or above the desired level
if (nAbove == 3 && nBelow == 1)
{
count = 1;
}
else if (nAbove == 1 && nBelow == 3)
{
count = 1;
}
else if (nAbove == 2 && nBelow == 2)
{
count = 2;
}
else
{
count = 0;
}
atomicAdd(numExpectedPoints, count); //add to the number of expected edges total
}
}
__global__ void computeKernel(float *vertexes, int nRows, int nCols, int level, int *edgeCount, vec2 *actualEdgePoints, int *buf_location)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = y * gridDim.x * blockDim.x + x; //the index in the vertex array, acquired by multiplying the dimensions of each block and the threads within those blocks
int bound = ((nRows * nCols) - 1) - nCols; //we do not want to check the top row of the grid or the index at the farthest right
if (index <= bound && ((index % nCols) != nCols - 1)) //check if row below top row, and column before last column
{
//each point to check
float bottomL = vertexes[index];
float bottomR = vertexes[index + 1];
float topL = vertexes[index + nCols];
float topR = vertexes[index + nCols + 1];
int loc; //the location of our index in the actualEdgePoints array so that we do not overlap edge points
bool vertfound = false; //if we have found one vertex already
int count = 0; //the number of vertexes we have found so far pertaining to one edge
float x_coord = -1.0;
float y_coord = -1.0;
//check for missing data and return if found missing
if (bottomL == -9999 || bottomR == -9999 || topL == -9999 || topR == -9999)
{
return; //do not check
}
//check every corner of the square starting from the bottom line, to right vertical, top horizontal, then left vertical
if ((bottomL <= level && level <= bottomR) || (bottomL > level && level > bottomR)) //if the level is between the two points, not dependent on which corner is greater
{
if (bottomL <= level && level <= bottomR) //if the bottom right is greater
{
float f = (level - bottomL) / (bottomR - bottomL); //using the function given to find the coordinate between points
x_coord = (1.0 - f) * (index % nCols) + f * ((index + 1) % nCols); //use that percentage and attribute it to x and y values, depending on which part of the square we are checking
y_coord = (float)(index / nCols); //use the normal y coordinate
}
else if (bottomL > level && level > bottomR) //bottom left is greater, so the function is switched backwards
{
float f = (level - bottomR) / (bottomL - bottomR);
x_coord = (1.0 - f) * ((index + 1) % nCols) + f * (index % nCols);
y_coord = (float)(index / nCols);
}
if (!vertfound) //we have not found a vertice already, this is the first point of our edge
{
loc = atomicAdd(buf_location, 2); //get the index to add this vertex coordinate set to the actualEdgePoint array
vertfound = true; //set to true so that we know we are on our second vertex of a certain edge
}
actualEdgePoints[loc + count][0] = x_coord; //set the coordinates of the vertex
actualEdgePoints[loc + count][1] = y_coord;
count++; //add to know how many vertices we have added so far
if (count == 2) //checks if we have completed our edge with 2 vertices, reset the edge count
{
vertfound = 0;
count = 0;
atomicAdd(edgeCount, 1); //add to the total number of edges that we have
}
}
//repeat
if ((bottomL <= level && level <= topL) || (bottomL > level && level > topL))
{
if (bottomL <= level && level <= topL)
{
float f = (level - bottomL) / (topL - bottomL);
x_coord = (float)(index % nCols);
y_coord = (1.0 - f) * (index / nCols) + f * ((index + nCols) / nCols);
}
else if (bottomL > level && level > topL)
{
float f = (level - topL) / (bottomL - topL);
x_coord = (float)(index % nCols);
y_coord = (1.0 - f) * ((index + nCols) / nCols) + f * (index / nCols);
}
if (!vertfound)
{
loc = atomicAdd(buf_location, 2);
vertfound = true;
}
actualEdgePoints[loc + count][0] = x_coord;
actualEdgePoints[loc + count][1] = y_coord;
count++;
if (count == 2)
{
vertfound = 0;
count = 0;
atomicAdd(edgeCount, 1);
}
}
if ((topR <= level && level <= topL) || (topR > level && level > topL))
{
if (topR <= level && level <= topL)
{
float f = (level - topR) / (topL - topR);
x_coord = (1.0 - f) * ((index + nCols + 1) % nCols) + f * ((index + nCols) % nCols);
y_coord = (float)((index + nCols) / nCols);
}
else if (topR > level && level > topL)
{
float f = (level - topL) / (topR - topL);
x_coord = (1.0 - f) * ((index + nCols) % nCols) + f * ((index + nCols + 1) % nCols);
y_coord = (float)((index + nCols) / nCols);
}
if (!vertfound)
{
loc = atomicAdd(buf_location, 2);
vertfound = true;
}
actualEdgePoints[loc + count][0] = x_coord;
actualEdgePoints[loc + count][1] = y_coord;
count++;
if (count == 2)
{
vertfound = 0;
count = 0;
atomicAdd(edgeCount, 1);
}
}
if ((topR <= level && level <= bottomR) || (topR > level && level > bottomR))
{
if (topR <= level && level <= bottomR)
{
float f = (level - topR) / (bottomR - topR);
x_coord = (float)((index + 1) % nCols);
y_coord = (1.0 - f) * ((index + nCols + 1) / nCols) + f * ((index + 1) / nCols);
}
else if (topR > level && level > bottomR)
{
float f = (level - bottomR) / (topR - bottomR);
x_coord = (float)((index + 1) % nCols);
y_coord = (1.0 - f) * ((index + 1) / nCols) + f * ((index + nCols + 1) / nCols);
}
if (!vertfound)
{
loc = atomicAdd(buf_location, 2);
vertfound = true;
}
actualEdgePoints[loc + count][0] = x_coord;
actualEdgePoints[loc + count][1] = y_coord;
count++;
if (count == 2)
{
vertfound = 0;
count = 0;
atomicAdd(edgeCount, 1);
}
}
}
}
int expectedEdgesKernel(float *vertexes, int nRows, int nCols, float level)
{
float *dev_varray; //device vertex array buffer to copy
int vert_size = (nRows * nCols) * sizeof(float); //size of vertex array to copy to gpu
int *dev_count; //expected edge device count variable to copy to gpu
int zero = 0; //start the device count at 0
int *host_count = &zero; //host count to copy gpu value back to cpu
cudaMalloc((void**)&dev_varray, vert_size); //allocate size to hold the vertex array in gpu
cudaMalloc((void**)&dev_count, sizeof(int)); //allocate one int variable on gpu to hold edge count
cudaMemcpy(dev_varray, vertexes, vert_size, cudaMemcpyHostToDevice); //copy vertexValues to the gpu in dev_varray
cudaMemcpy(dev_count, host_count, sizeof(int), cudaMemcpyHostToDevice); //copy edge count to gpu starting at 0
dim3 block(16, 16); //placeholder size for blocks only optimized for warps
dim3 grid((nRows + block.x - 1) / block.x, (nCols + block.y - 1) / block.y); //launch grid based on size of vertexValues divided by block thread size
countEdges<<<grid, block>>>(dev_varray, nRows, nCols, dev_count, level); //call kernel to count expected edges
cudaThreadSynchronize(); //barrier
cudaMemcpy(host_count, dev_count, sizeof(int), cudaMemcpyDeviceToHost); //copy device count back to host count to pass back
cudaFree(dev_varray); //free gpu vertex array
cudaFree(dev_count); //free device count
return *host_count;
}
int actualEdgesKernel(float *vertexes, int nRows, int nCols, float level, int numExpectedPoints, vec2 *buffer)
{
float *dev_varray; //device vertex array buffer to copy
int vert_size = (nRows * nCols) * sizeof(float); //size of vertex array to copy to gpu
int *dev_count; //actual edges device count variable to copy to gpu
int zero = 0; //start the device count at 0
int *host_count = &zero; //host count to copy gpu value back to cpu
int *buf_location; //index of the buffer that the coordinates should be placed at so edges are correct
cudaMalloc(&dev_varray, vert_size); //allocate size to hold the vertex array in gpu
cudaMalloc(&dev_count, sizeof(int)); //allocate one int variable on gpu to hold actual edge count
cudaMalloc(&buf_location, sizeof(int)); //allocate index of buffer we are writing coordinates to
vec2 *dev_buffer; //allocate buffer to hold points of actual edges calculated
cudaMalloc(&dev_buffer, 2 * numExpectedPoints * sizeof(float)); //two points for each edge calculated
cudaMemcpy(dev_varray, vertexes, vert_size, cudaMemcpyHostToDevice); //copy vertexValues to the gpu in dev_varray
cudaMemcpy(dev_count, host_count, sizeof(int), cudaMemcpyHostToDevice); //copy edge count to gpu starting at 0
cudaMemcpy(buf_location, host_count, sizeof(int), cudaMemcpyHostToDevice); //copy buffer index location to gpu
cudaMemcpy(dev_buffer, buffer, 2 * numExpectedPoints * sizeof(float), cudaMemcpyHostToDevice);
dim3 block(16, 16); //placeholder size for blocks only optimized for warps
dim3 grid((nRows + block.x - 1) / block.x, (nCols + block.y - 1) / block.y); //launch grid based on size of vertexValues divided by block thread size
computeKernel<<<grid, block>>>(dev_varray, nRows, nCols, level, dev_count, dev_buffer, buf_location); //compute actual number of edges in vertex array
cudaThreadSynchronize(); //barrier
cudaMemcpy(host_count, dev_count, sizeof(int), cudaMemcpyDeviceToHost); //copy back actual number of edges calculated
cudaMemcpy(buffer, dev_buffer, 2 * numExpectedPoints * sizeof(float), cudaMemcpyDeviceToHost); //copy the actual edges from the gpu to the actual_edge_buffer on the cpu to then fill lines
cudaFree(dev_varray); //free gpu vertex array
cudaFree(dev_count); //free device count
cudaFree(dev_buffer); //free gpu actual edge buffer
return *host_count;
}
|
f68ab33ffb486f85b37b2281c375e52c0887dc86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------------------
// This source file is part of BehaveRT
// http://isis.dia.unisa.it/projects/behavert/
//
// Copyright (c) 2008-2010 ISISLab - University of Salerno
// Original author: Bernardino Frola <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// ----------------
// Change log
//
// 03-10 bf: Created
//
// ----------------
#pragma once
#include "DeviceData.cuh"
#include "common_resources.cu"
#include "include\BehaviorClustering_kernel.cuh"
// Other plugIn dependencies
// Structures sharing
share_device_struct(BehaviorClusteringParams);
share_device_struct(BehaviorClusteringFields);
// Textures
texture<float, 1, hipReadModeElementType> featuresVectorTex;
texture<float, 1, hipReadModeElementType> neighSimilaritiesTex;
texture<float, 1, hipReadModeElementType> neighIndexesTex;
texture<float4, 1, hipReadModeElementType> similarityStatsTex;
// ----------------------------------------------------------------------
// Utility device functions
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ float getLinearInterpolation(
float x,
float xa, float ya,
float xb, float yb)
{
return
ya * ((x - xb) / (xa - xb)) +
yb * ((x - xa) / (xb - xa));
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ float interpolateSimilarity(float val, float min, float avg, float max)
{
// Linear interpolation
if (val > avg)
{
// Clamp for precision related errors
return clamp(getLinearInterpolation(val, avg, 0.5f, max, 1.0f),
0.0f, 1.0f);
}
return clamp(getLinearInterpolation(val, min, 0.0f, avg, 0.5f),
0.0f, 1.0f);
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ float4 updateSimilarityStats(float similarity, float4 oldStats)
{
float minVal = oldStats.x;
float avgVal = oldStats.y;
float maxVal = oldStats.z;
// --------------------------------------
float valueToBlendInAvg =
similarity *
dBehaviorClusteringParams.avgSimilarityController +
((maxVal + minVal) / 2.0f) *
(1 - dBehaviorClusteringParams.avgSimilarityController);
avgVal +=
(valueToBlendInAvg - oldStats.y) *
dBehaviorClusteringParams.similarityStatsParams.x;
avgVal = clamp(avgVal, 0.0f, 1.0f);
// --------------------------------------
if (similarity < minVal)
{
minVal += (similarity - minVal) *
dBehaviorClusteringParams.similarityStatsParams.y;
// Bind to avgVal
if (minVal > avgVal)
minVal = clamp(minVal, 0.0f, similarity);
}
else if (minVal < avgVal)
minVal += dBehaviorClusteringParams.similarityStatsParams.z; // "Memory"
// --------------------------------------
if (similarity > maxVal)
{
maxVal += (similarity - maxVal) *
dBehaviorClusteringParams.similarityStatsParams.y;
// Bind to avgVal
if (maxVal < avgVal)
maxVal = clamp(maxVal, similarity, 1.0f);
}
else if (maxVal > avgVal)
maxVal -= dBehaviorClusteringParams.similarityStatsParams.z; // "Memory"
// --------------------------------------
oldStats.x = minVal;
oldStats.y = avgVal;
oldStats.z = maxVal;
return oldStats;
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// Device functions
// Compute the distance between two features
// vector of length dBehaviorClusteringParams.featuresCount
__device__ float computeSimilarity(
float* feturesVectorRef1,
float* feturesVectorRef2)
{
float norma1 = 0.0;
float norma2 = 0.0;
float dot_scalar = 0.0;
for(int k = 0; k < dBehaviorClusteringParams.featuresCount; k ++)
{
float featureValue1 = feturesVectorRef1[k];
float featureValue2 = feturesVectorRef2[k];
// Calculate the dot product
dot_scalar += (featureValue1 * featureValue2);
norma1 += (featureValue1 * featureValue1);
norma2 += (featureValue2 * featureValue2);
}
norma1 = sqrt(norma1);
norma2 = sqrt(norma2);
float similarity = dot_scalar /(norma1*norma2); // Similarity [-1..1]
return (similarity + 1.0f) / 2.0f; // Get a value [0..1]
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ float computeSimilarityFromTex(
int individualIndex1, int individualIndex2)
{
float norma1 = 0.0;
float norma2 = 0.0;
float dot_scalar = 0.0;
individualIndex1 *= dBehaviorClusteringParams.featuresCount;
individualIndex2 *= dBehaviorClusteringParams.featuresCount;
for(int k = 0; k < dBehaviorClusteringParams.featuresCount; k ++)
{
float featureValue1 = FETCH(featuresVector, individualIndex1 + k);
float featureValue2 = FETCH(featuresVector, individualIndex2 + k);
// Calculate the dot product
dot_scalar += (featureValue1 * featureValue2);
norma1 += (featureValue1 * featureValue1);
norma2 += (featureValue2 * featureValue2);
}
norma1 = sqrt(norma1);
norma2 = sqrt(norma2);
float similarity = dot_scalar /(norma1*norma2); // Similarity [-1..1]
return (similarity + 1.0f) / 2.0f; // Get a value [0..1]
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ float computeSimilarityFromShrdTex(
float* feturesVectorShrdRef1,
int individualIndex2)
{
float norma1 = 0.0;
float norma2 = 0.0;
float dot_scalar = 0.0;
individualIndex2 *= dBehaviorClusteringParams.featuresCount;
for(int k = 0; k < dBehaviorClusteringParams.featuresCount; k ++)
{
float featureValue1 = feturesVectorShrdRef1[k];
float featureValue2 = FETCH(featuresVector, individualIndex2 + k);
// Calculate the dot product
dot_scalar += (featureValue1 * featureValue2);
norma1 += (featureValue1 * featureValue1);
norma2 += (featureValue2 * featureValue2);
}
norma1 = sqrt(norma1);
norma2 = sqrt(norma2);
float similarity = dot_scalar /(norma1*norma2); // Similarity [-1..1]
return (similarity + 1.0f) / 2.0f; // Get a value [0..1]
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ void getNeighborsSimilarities(int index, float* neighSimilarities)
{
declare_input(neighSimilaritiesList, float, dBehaviorClusteringFields.neighSimilarities);
const int neighIndexBase = __mul24(index, dProximity3DParams.maxNeighbors + 1);
neighSimilarities = neighSimilaritiesList + neighIndexBase;
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// Declare and init neigh lists and similarities
#define BEHAVIORCLUSTERING_PREPARENEIGHLISTS \
uint neighNum; \
uint neighList[Proximity3d_MAX_NEIGHBORS]; \
Proximity3D::getNeighborsList(index, neighNum, neighList); \
declare_input(neighSimilaritiesList, float, dBehaviorClusteringFields.neighSimilarities); \
const int neighIndexBase = __mul24(index, dProximity3DParams.maxNeighbors + 1); \
float* neighSimilarities = neighSimilaritiesList + neighIndexBase;
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__global__ void BehaviorClustering_steerForClusterCohesion()
{
int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint sortedIndex = FETCH(agentHash, index).y;
if (sortedIndex > dBehaviorClusteringParams.elementsNumber)
return;
// Vars
float3 myPos = make_float3(tex1Dfetch(oldPosTex, index));
float3 myForward = make_float3(tex1Dfetch(oldForwardTex, index));
float mySpeed = tex1Dfetch(oldForwardTex, index).w;
float3 steering = make_float3(0, 0, 0);
// Macro
BEHAVIORCLUSTERING_PREPARENEIGHLISTS
/*uint neighNum;
uint neighList[Proximity3d_MAX_NEIGHBORS];
//float neighSimilarities[Proximity3d_MAX_NEIGHBORS];
Proximity3D::getNeighborsList(index, neighNum, neighList);
//getNeighborsSimilarities(index, neighSimilarities);
declare_input(neighSimilaritiesList, float, dBehaviorClusteringFields.neighSimilarities);
const int neighIndexBase = __mul24(index, dProximity3DParams.maxNeighbors + 1);
float* neighSimilarities = neighSimilaritiesList + neighIndexBase;
*/
// Neighs list does not contain the idividual that is executing the behavior
for (int i = 0; i < neighNum; i ++)
{
// Get indexes and similarities of neighbors
uint otherIndex = neighList[i];
float similarity = neighSimilarities[i];
//float similarity = 1.0f;
// --------------------------------
// DEBUG
//uint otherSortedIndex = FETCH(agentHash, otherIndex).y;
//if (sortedIndex == dBehaviorClusteringParams.debugIndex)
// cuPrintf("C %d-%d) %f\n", sortedIndex, otherSortedIndex, similarity);
// --------------------------------
float3 otherPos = make_float3((float4)FETCH(oldPos, otherIndex));
// Calculate perpendicular forces
float3 seekForce =
OpenSteerWrapper::xxxsteerForSeek(
myPos, mySpeed, myForward, otherPos,
dOpenSteerWrapperParams.commonMaxSpeed);
float3 fleeForce =
OpenSteerWrapper::xxxsteerForFlee(
myPos, mySpeed, myForward, otherPos,
dOpenSteerWrapperParams.commonMaxSpeed);
// Combine forces using the similary value
steering +=
seekForce * similarity +
fleeForce * (1.0f - similarity);
}
// Normalize and add a force weight
if (neighNum > 0)
{
steering = normalize(steering / (float)neighNum) *
dBehaviorClusteringParams.clusteringForceParams.x;
}
OpenSteerWrapper::blendIntoSteeringForce(index, steering);
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__global__ void BehaviorClustering_steerForClusterAlignment()
{
int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint sortedIndex = FETCH(agentHash, index).y;
if (sortedIndex > dBehaviorClusteringParams.elementsNumber)
return;
// Var init
float3 myForward = make_float3(tex1Dfetch(oldForwardTex, index));
float3 steering = make_float3(0, 0, 0);
// Macro
BEHAVIORCLUSTERING_PREPARENEIGHLISTS
// Neighs list does not contain the idividual that is executing the behavior
for (int i = 0; i < neighNum; i ++)
{
// Get indexes and similarities of neighbors
uint otherIndex = neighList[i];
float similarity = neighSimilarities[i];
float3 otherForward = make_float3(tex1Dfetch(oldForwardTex, otherIndex));
// Calc the similarity alignment
steering += otherForward * similarity;
}
// Normalize and add a force weight
if (neighNum > 0)
{
steering = normalize((steering / (float)neighNum) - myForward) *
dBehaviorClusteringParams.alignmentClusteringForceParams.x;
}
OpenSteerWrapper::blendIntoSteeringForce(index, steering);
}
| f68ab33ffb486f85b37b2281c375e52c0887dc86.cu | // ----------------------------------------------------------------------------
// This source file is part of BehaveRT
// http://isis.dia.unisa.it/projects/behavert/
//
// Copyright (c) 2008-2010 ISISLab - University of Salerno
// Original author: Bernardino Frola <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// ----------------
// Change log
//
// 03-10 bf: Created
//
// ----------------
#pragma once
#include "DeviceData.cuh"
#include "common_resources.cu"
#include "include\BehaviorClustering_kernel.cuh"
// Other plugIn dependencies
// Structures sharing
share_device_struct(BehaviorClusteringParams);
share_device_struct(BehaviorClusteringFields);
// Textures
texture<float, 1, cudaReadModeElementType> featuresVectorTex;
texture<float, 1, cudaReadModeElementType> neighSimilaritiesTex;
texture<float, 1, cudaReadModeElementType> neighIndexesTex;
texture<float4, 1, cudaReadModeElementType> similarityStatsTex;
// ----------------------------------------------------------------------
// Utility device functions
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ float getLinearInterpolation(
float x,
float xa, float ya,
float xb, float yb)
{
return
ya * ((x - xb) / (xa - xb)) +
yb * ((x - xa) / (xb - xa));
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ float interpolateSimilarity(float val, float min, float avg, float max)
{
// Linear interpolation
if (val > avg)
{
// Clamp for precision related errors
return clamp(getLinearInterpolation(val, avg, 0.5f, max, 1.0f),
0.0f, 1.0f);
}
return clamp(getLinearInterpolation(val, min, 0.0f, avg, 0.5f),
0.0f, 1.0f);
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ float4 updateSimilarityStats(float similarity, float4 oldStats)
{
float minVal = oldStats.x;
float avgVal = oldStats.y;
float maxVal = oldStats.z;
// --------------------------------------
float valueToBlendInAvg =
similarity *
dBehaviorClusteringParams.avgSimilarityController +
((maxVal + minVal) / 2.0f) *
(1 - dBehaviorClusteringParams.avgSimilarityController);
avgVal +=
(valueToBlendInAvg - oldStats.y) *
dBehaviorClusteringParams.similarityStatsParams.x;
avgVal = clamp(avgVal, 0.0f, 1.0f);
// --------------------------------------
if (similarity < minVal)
{
minVal += (similarity - minVal) *
dBehaviorClusteringParams.similarityStatsParams.y;
// Bind to avgVal
if (minVal > avgVal)
minVal = clamp(minVal, 0.0f, similarity);
}
else if (minVal < avgVal)
minVal += dBehaviorClusteringParams.similarityStatsParams.z; // "Memory"
// --------------------------------------
if (similarity > maxVal)
{
maxVal += (similarity - maxVal) *
dBehaviorClusteringParams.similarityStatsParams.y;
// Bind to avgVal
if (maxVal < avgVal)
maxVal = clamp(maxVal, similarity, 1.0f);
}
else if (maxVal > avgVal)
maxVal -= dBehaviorClusteringParams.similarityStatsParams.z; // "Memory"
// --------------------------------------
oldStats.x = minVal;
oldStats.y = avgVal;
oldStats.z = maxVal;
return oldStats;
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// Device functions
// Compute the distance between two features
// vector of length dBehaviorClusteringParams.featuresCount
__device__ float computeSimilarity(
float* feturesVectorRef1,
float* feturesVectorRef2)
{
float norma1 = 0.0;
float norma2 = 0.0;
float dot_scalar = 0.0;
for(int k = 0; k < dBehaviorClusteringParams.featuresCount; k ++)
{
float featureValue1 = feturesVectorRef1[k];
float featureValue2 = feturesVectorRef2[k];
// Calculate the dot product
dot_scalar += (featureValue1 * featureValue2);
norma1 += (featureValue1 * featureValue1);
norma2 += (featureValue2 * featureValue2);
}
norma1 = sqrt(norma1);
norma2 = sqrt(norma2);
float similarity = dot_scalar /(norma1*norma2); // Similarity [-1..1]
return (similarity + 1.0f) / 2.0f; // Get a value [0..1]
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ float computeSimilarityFromTex(
int individualIndex1, int individualIndex2)
{
float norma1 = 0.0;
float norma2 = 0.0;
float dot_scalar = 0.0;
individualIndex1 *= dBehaviorClusteringParams.featuresCount;
individualIndex2 *= dBehaviorClusteringParams.featuresCount;
for(int k = 0; k < dBehaviorClusteringParams.featuresCount; k ++)
{
float featureValue1 = FETCH(featuresVector, individualIndex1 + k);
float featureValue2 = FETCH(featuresVector, individualIndex2 + k);
// Calculate the dot product
dot_scalar += (featureValue1 * featureValue2);
norma1 += (featureValue1 * featureValue1);
norma2 += (featureValue2 * featureValue2);
}
norma1 = sqrt(norma1);
norma2 = sqrt(norma2);
float similarity = dot_scalar /(norma1*norma2); // Similarity [-1..1]
return (similarity + 1.0f) / 2.0f; // Get a value [0..1]
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ float computeSimilarityFromShrdTex(
float* feturesVectorShrdRef1,
int individualIndex2)
{
float norma1 = 0.0;
float norma2 = 0.0;
float dot_scalar = 0.0;
individualIndex2 *= dBehaviorClusteringParams.featuresCount;
for(int k = 0; k < dBehaviorClusteringParams.featuresCount; k ++)
{
float featureValue1 = feturesVectorShrdRef1[k];
float featureValue2 = FETCH(featuresVector, individualIndex2 + k);
// Calculate the dot product
dot_scalar += (featureValue1 * featureValue2);
norma1 += (featureValue1 * featureValue1);
norma2 += (featureValue2 * featureValue2);
}
norma1 = sqrt(norma1);
norma2 = sqrt(norma2);
float similarity = dot_scalar /(norma1*norma2); // Similarity [-1..1]
return (similarity + 1.0f) / 2.0f; // Get a value [0..1]
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__device__ void getNeighborsSimilarities(int index, float* neighSimilarities)
{
declare_input(neighSimilaritiesList, float, dBehaviorClusteringFields.neighSimilarities);
const int neighIndexBase = __mul24(index, dProximity3DParams.maxNeighbors + 1);
neighSimilarities = neighSimilaritiesList + neighIndexBase;
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// Declare and init neigh lists and similarities
#define BEHAVIORCLUSTERING_PREPARENEIGHLISTS \
uint neighNum; \
uint neighList[Proximity3d_MAX_NEIGHBORS]; \
Proximity3D::getNeighborsList(index, neighNum, neighList); \
declare_input(neighSimilaritiesList, float, dBehaviorClusteringFields.neighSimilarities); \
const int neighIndexBase = __mul24(index, dProximity3DParams.maxNeighbors + 1); \
float* neighSimilarities = neighSimilaritiesList + neighIndexBase;
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__global__ void BehaviorClustering_steerForClusterCohesion()
{
int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint sortedIndex = FETCH(agentHash, index).y;
if (sortedIndex > dBehaviorClusteringParams.elementsNumber)
return;
// Vars
float3 myPos = make_float3(tex1Dfetch(oldPosTex, index));
float3 myForward = make_float3(tex1Dfetch(oldForwardTex, index));
float mySpeed = tex1Dfetch(oldForwardTex, index).w;
float3 steering = make_float3(0, 0, 0);
// Macro
BEHAVIORCLUSTERING_PREPARENEIGHLISTS
/*uint neighNum;
uint neighList[Proximity3d_MAX_NEIGHBORS];
//float neighSimilarities[Proximity3d_MAX_NEIGHBORS];
Proximity3D::getNeighborsList(index, neighNum, neighList);
//getNeighborsSimilarities(index, neighSimilarities);
declare_input(neighSimilaritiesList, float, dBehaviorClusteringFields.neighSimilarities);
const int neighIndexBase = __mul24(index, dProximity3DParams.maxNeighbors + 1);
float* neighSimilarities = neighSimilaritiesList + neighIndexBase;
*/
// Neighs list does not contain the idividual that is executing the behavior
for (int i = 0; i < neighNum; i ++)
{
// Get indexes and similarities of neighbors
uint otherIndex = neighList[i];
float similarity = neighSimilarities[i];
//float similarity = 1.0f;
// --------------------------------
// DEBUG
//uint otherSortedIndex = FETCH(agentHash, otherIndex).y;
//if (sortedIndex == dBehaviorClusteringParams.debugIndex)
// cuPrintf("C %d-%d) %f\n", sortedIndex, otherSortedIndex, similarity);
// --------------------------------
float3 otherPos = make_float3((float4)FETCH(oldPos, otherIndex));
// Calculate perpendicular forces
float3 seekForce =
OpenSteerWrapper::xxxsteerForSeek(
myPos, mySpeed, myForward, otherPos,
dOpenSteerWrapperParams.commonMaxSpeed);
float3 fleeForce =
OpenSteerWrapper::xxxsteerForFlee(
myPos, mySpeed, myForward, otherPos,
dOpenSteerWrapperParams.commonMaxSpeed);
// Combine forces using the similary value
steering +=
seekForce * similarity +
fleeForce * (1.0f - similarity);
}
// Normalize and add a force weight
if (neighNum > 0)
{
steering = normalize(steering / (float)neighNum) *
dBehaviorClusteringParams.clusteringForceParams.x;
}
OpenSteerWrapper::blendIntoSteeringForce(index, steering);
}
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
__global__ void BehaviorClustering_steerForClusterAlignment()
{
int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint sortedIndex = FETCH(agentHash, index).y;
if (sortedIndex > dBehaviorClusteringParams.elementsNumber)
return;
// Var init
float3 myForward = make_float3(tex1Dfetch(oldForwardTex, index));
float3 steering = make_float3(0, 0, 0);
// Macro
BEHAVIORCLUSTERING_PREPARENEIGHLISTS
// Neighs list does not contain the idividual that is executing the behavior
for (int i = 0; i < neighNum; i ++)
{
// Get indexes and similarities of neighbors
uint otherIndex = neighList[i];
float similarity = neighSimilarities[i];
float3 otherForward = make_float3(tex1Dfetch(oldForwardTex, otherIndex));
// Calc the similarity alignment
steering += otherForward * similarity;
}
// Normalize and add a force weight
if (neighNum > 0)
{
steering = normalize((steering / (float)neighNum) - myForward) *
dBehaviorClusteringParams.alignmentClusteringForceParams.x;
}
OpenSteerWrapper::blendIntoSteeringForce(index, steering);
}
|
85d590be116be7000cc6754d6227e3440fd39d80.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include "stdio.h"
#include <hip/hip_runtime.h>
//#include <cudaGL.h>
using namespace std;
const int DIM = 16;
texture<unsigned char,2,hipReadModeNormalizedFloat> texIn;
__global__ void fun(unsigned char * in, float * out)
{
int x = threadIdx.x;
int y = threadIdx.y;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
//out[tid] = tex2D(texIn,x,y) + in[tid];
out[tid] = tex2D(texIn,x,y) + in[tid] + x * 10000 + y * 1000000;
}
int main()
{
//memory in kernel
unsigned char * inGlobe;
float * outGlobe;
//memory in host
unsigned char tmp1[DIM][DIM];
float tmp[DIM][DIM];
dim3 threads(DIM,DIM);
hipMalloc((void**)&inGlobe,DIM * DIM );
hipMalloc((void**)&outGlobe,DIM * DIM * sizeof (float));
//hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>();
hipChannelFormatDesc desc = hipCreateChannelDesc(16, 16, 0, 0, hipChannelFormatKindUnsigned);
hipBindTexture2D( NULL, texIn,inGlobe,
desc, DIM, DIM,
DIM * DIM * sizeof (unsigned char) );
//root cause is there is error, we need DIM * DIM * sizeof (char)
for (int i = 0; i < DIM; ++i)
for (int j = 0; j< DIM; ++j)
tmp1[i][j] = i*16 + j;
hipMemcpy( inGlobe, tmp1, DIM * DIM * sizeof(unsigned char), hipMemcpyHostToDevice );
hipLaunchKernelGGL(( fun), dim3(1),dim3(threads), 0, 0, inGlobe,outGlobe);
hipDeviceSynchronize();
hipMemcpy(tmp,outGlobe, DIM *DIM * sizeof(float), hipMemcpyDeviceToHost );
hipDeviceSynchronize();
for (int i = 0; i < DIM; ++i)
{
for (int j = 0; j< DIM; ++j)
{
printf("%f ", tmp[i][j]);
}
cout << endl;
}
int k;
cin >> k;
return 0;
}
#include <hip/hip_runtime.h>
#include <iostream>
#include "stdio.h"
#include <hip/hip_runtime.h>
using namespace std;
const int DIM = 8;
texture<unsigned char,2,hipReadModeNormalizedFloat> texIn;
__global__ void fun(unsigned char * in, float * out)
{
int x = threadIdx.x;
int y = threadIdx.y;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
printf(" %d %d %d %d %f \n ",x,y,tid, in[tid],tex2D(texIn,x,y));
//out[tid] = tex2D(texIn,x,y) + in[tid];
//out[tid] = tex2D(texIn,x,y) + in[tid] + x * 10000 + y * 1000000;
}
int main()
{
//memory in kernel
unsigned char * inGlobe;
float * outGlobe;
//memory in host
unsigned char tmp1[DIM][DIM];
float tmp[DIM][DIM];
dim3 threads(DIM,DIM);
hipMalloc((void**)&inGlobe,DIM * DIM );
hipMalloc((void**)&outGlobe,DIM * DIM * sizeof (float));
//hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>();
hipChannelFormatDesc desc = hipCreateChannelDesc(DIM, DIM, 0, 0, hipChannelFormatKindUnsigned);
hipBindTexture2D( NULL, texIn,inGlobe,
desc, DIM, DIM,
sizeof (unsigned char) );
//root cause is there is error, we need DIM * DIM * sizeof (char)
for (int i = 0; i < DIM; ++i)
for (int j = 0; j< DIM; ++j)
tmp1[i][j] = i * DIM + j ;
hipMemcpy( inGlobe, tmp1, DIM * DIM * sizeof(unsigned char), hipMemcpyHostToDevice );
hipLaunchKernelGGL(( fun), dim3(1),dim3(threads), 0, 0, inGlobe,outGlobe);
//hipDeviceSynchronize();
hipMemcpy(tmp,outGlobe, DIM *DIM * sizeof(float), hipMemcpyDeviceToHost );
hipDeviceSynchronize();
/*
for (int i = 0; i < DIM; ++i)
{
for (int j = 0; j< DIM; ++j)
{
printf("%f ", tmp[i][j]);
}
cout << endl;
}
*/
int k;
cin >> k;
hipFree(inGlobe);
hipFree(outGlobe);
return 0;
}
| 85d590be116be7000cc6754d6227e3440fd39d80.cu | #include <cuda_runtime.h>
#include <iostream>
#include "stdio.h"
#include <cuda.h>
//#include <cudaGL.h>
using namespace std;
const int DIM = 16;
texture<unsigned char,2,cudaReadModeNormalizedFloat> texIn;
__global__ void fun(unsigned char * in, float * out)
{
int x = threadIdx.x;
int y = threadIdx.y;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
//out[tid] = tex2D(texIn,x,y) + in[tid];
out[tid] = tex2D(texIn,x,y) + in[tid] + x * 10000 + y * 1000000;
}
int main()
{
//memory in kernel
unsigned char * inGlobe;
float * outGlobe;
//memory in host
unsigned char tmp1[DIM][DIM];
float tmp[DIM][DIM];
dim3 threads(DIM,DIM);
cudaMalloc((void**)&inGlobe,DIM * DIM );
cudaMalloc((void**)&outGlobe,DIM * DIM * sizeof (float));
//cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
cudaChannelFormatDesc desc = cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindUnsigned);
cudaBindTexture2D( NULL, texIn,inGlobe,
desc, DIM, DIM,
DIM * DIM * sizeof (unsigned char) );
//root cause is there is error, we need DIM * DIM * sizeof (char)
for (int i = 0; i < DIM; ++i)
for (int j = 0; j< DIM; ++j)
tmp1[i][j] = i*16 + j;
cudaMemcpy( inGlobe, tmp1, DIM * DIM * sizeof(unsigned char), cudaMemcpyHostToDevice );
fun<<<1,threads>>>(inGlobe,outGlobe);
cudaThreadSynchronize();
cudaMemcpy(tmp,outGlobe, DIM *DIM * sizeof(float), cudaMemcpyDeviceToHost );
cudaThreadSynchronize();
for (int i = 0; i < DIM; ++i)
{
for (int j = 0; j< DIM; ++j)
{
printf("%f ", tmp[i][j]);
}
cout << endl;
}
int k;
cin >> k;
return 0;
}
#include <cuda_runtime.h>
#include <iostream>
#include "stdio.h"
#include <cuda.h>
using namespace std;
const int DIM = 8;
texture<unsigned char,2,cudaReadModeNormalizedFloat> texIn;
__global__ void fun(unsigned char * in, float * out)
{
int x = threadIdx.x;
int y = threadIdx.y;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
printf(" %d %d %d %d %f \n ",x,y,tid, in[tid],tex2D(texIn,x,y));
//out[tid] = tex2D(texIn,x,y) + in[tid];
//out[tid] = tex2D(texIn,x,y) + in[tid] + x * 10000 + y * 1000000;
}
int main()
{
//memory in kernel
unsigned char * inGlobe;
float * outGlobe;
//memory in host
unsigned char tmp1[DIM][DIM];
float tmp[DIM][DIM];
dim3 threads(DIM,DIM);
cudaMalloc((void**)&inGlobe,DIM * DIM );
cudaMalloc((void**)&outGlobe,DIM * DIM * sizeof (float));
//cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
cudaChannelFormatDesc desc = cudaCreateChannelDesc(DIM, DIM, 0, 0, cudaChannelFormatKindUnsigned);
cudaBindTexture2D( NULL, texIn,inGlobe,
desc, DIM, DIM,
sizeof (unsigned char) );
//root cause is there is error, we need DIM * DIM * sizeof (char)
for (int i = 0; i < DIM; ++i)
for (int j = 0; j< DIM; ++j)
tmp1[i][j] = i * DIM + j ;
cudaMemcpy( inGlobe, tmp1, DIM * DIM * sizeof(unsigned char), cudaMemcpyHostToDevice );
fun<<<1,threads>>>(inGlobe,outGlobe);
//cudaThreadSynchronize();
cudaMemcpy(tmp,outGlobe, DIM *DIM * sizeof(float), cudaMemcpyDeviceToHost );
cudaThreadSynchronize();
/*
for (int i = 0; i < DIM; ++i)
{
for (int j = 0; j< DIM; ++j)
{
printf("%f ", tmp[i][j]);
}
cout << endl;
}
*/
int k;
cin >> k;
cudaFree(inGlobe);
cudaFree(outGlobe);
return 0;
}
|
abf6179d29ce536fb822a4442ec90ebb846cd34f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void execShuffleKernel(void **vdX, Nd4jLong **dxShapeInfo,
void **vdZ,
int N,
int *shuffleMap,
Nd4jLong **tadOnlyShapeInfo, Nd4jLong **tadOffsets) {
// we assume that shuffle map for each X contains pair TAD Y
auto dX = reinterpret_cast<T **>(vdX);
auto dZ = reinterpret_cast<T **>(vdZ);
__shared__ int tadLength;
__shared__ int xRank;
__shared__ int tadEWS;
__shared__ int numTads;
__shared__ Nd4jLong* xShapeInfo;
__shared__ Nd4jLong xLength;
for (int f = 0; f < N; f++) {
auto x = reinterpret_cast<T *>(dX[f]);
auto z = reinterpret_cast<T *>(dZ[f]);
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo[f]);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo[f]);
xShapeInfo = dxShapeInfo[f];
xRank = shape::rank(xShapeInfo);
xLength = shape::length(xShapeInfo);
numTads = xLength / tadLength;
}
__syncthreads();
if (xRank == 1) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int r = tid; r < xLength; r += gridDim.x * blockDim.x) {
auto swapIndex = shuffleMap[r];
if (swapIndex >= 0 && swapIndex < xLength) {
int idx = r * tadEWS;
int swap = swapIndex * tadEWS;
T oldX = x[idx];
x[idx] = x[swap];
x[swap] = oldX;
}
}
} else {
// we roll over the pairs of TADs, thus limit is numTads / 2
for (uint r = blockIdx.x; r < numTads; r += gridDim.x) {
if (shuffleMap[r] >= 0) {
auto oldOffset = tadOffsets[f][r];
auto newOffset = tadOffsets[f][shuffleMap[r]];
auto rX = x + oldOffset;
auto rY = x + newOffset;
auto zX = z + oldOffset;
auto zY = z + newOffset;
// so we're going to change TAD[oldOffset] with TAD[newOffset]
if (tadEWS == 1) {
for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x) {
T oldX = rX[i];
rX[i] = rY[i];
zY[i] = oldX;
}
} else {
for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = shape::getIndexOffset(i, tadOnlyShapeInfo[f]);
auto yOffset = newOffset + xOffset;
xOffset += oldOffset;
T oldX = x[xOffset];
z[xOffset] = x[yOffset];
z[yOffset] = oldX;
}
}
}
}
}
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void shuffleKernelGeneric(dim3 &launchDims, hipStream_t *stream,
void **vdX, Nd4jLong **xShapeInfo,
void **vdZ,
int N,
int *shuffleMap,
Nd4jLong **tadOnlyShapeInfo, Nd4jLong **tadOffsets) {
hipLaunchKernelGGL(( execShuffleKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vdX, xShapeInfo, vdZ, N, shuffleMap, tadOnlyShapeInfo, tadOffsets);
sd::DebugHelper::checkErrorCode(stream, "shuffleGeneric(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT shuffleKernelGeneric, (dim3 & launchDims, hipStream_t * stream, void * *vdX, Nd4jLong * *xShapeInfo, void **vdZ, int N, int * shuffleMap, Nd4jLong * *tadOnlyShapeInfo, Nd4jLong * *tadOffsets), LIBND4J_TYPES);
} | abf6179d29ce536fb822a4442ec90ebb846cd34f.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void execShuffleKernel(void **vdX, Nd4jLong **dxShapeInfo,
void **vdZ,
int N,
int *shuffleMap,
Nd4jLong **tadOnlyShapeInfo, Nd4jLong **tadOffsets) {
// we assume that shuffle map for each X contains pair TAD Y
auto dX = reinterpret_cast<T **>(vdX);
auto dZ = reinterpret_cast<T **>(vdZ);
__shared__ int tadLength;
__shared__ int xRank;
__shared__ int tadEWS;
__shared__ int numTads;
__shared__ Nd4jLong* xShapeInfo;
__shared__ Nd4jLong xLength;
for (int f = 0; f < N; f++) {
auto x = reinterpret_cast<T *>(dX[f]);
auto z = reinterpret_cast<T *>(dZ[f]);
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo[f]);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo[f]);
xShapeInfo = dxShapeInfo[f];
xRank = shape::rank(xShapeInfo);
xLength = shape::length(xShapeInfo);
numTads = xLength / tadLength;
}
__syncthreads();
if (xRank == 1) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int r = tid; r < xLength; r += gridDim.x * blockDim.x) {
auto swapIndex = shuffleMap[r];
if (swapIndex >= 0 && swapIndex < xLength) {
int idx = r * tadEWS;
int swap = swapIndex * tadEWS;
T oldX = x[idx];
x[idx] = x[swap];
x[swap] = oldX;
}
}
} else {
// we roll over the pairs of TADs, thus limit is numTads / 2
for (uint r = blockIdx.x; r < numTads; r += gridDim.x) {
if (shuffleMap[r] >= 0) {
auto oldOffset = tadOffsets[f][r];
auto newOffset = tadOffsets[f][shuffleMap[r]];
auto rX = x + oldOffset;
auto rY = x + newOffset;
auto zX = z + oldOffset;
auto zY = z + newOffset;
// so we're going to change TAD[oldOffset] with TAD[newOffset]
if (tadEWS == 1) {
for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x) {
T oldX = rX[i];
rX[i] = rY[i];
zY[i] = oldX;
}
} else {
for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = shape::getIndexOffset(i, tadOnlyShapeInfo[f]);
auto yOffset = newOffset + xOffset;
xOffset += oldOffset;
T oldX = x[xOffset];
z[xOffset] = x[yOffset];
z[yOffset] = oldX;
}
}
}
}
}
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void shuffleKernelGeneric(dim3 &launchDims, cudaStream_t *stream,
void **vdX, Nd4jLong **xShapeInfo,
void **vdZ,
int N,
int *shuffleMap,
Nd4jLong **tadOnlyShapeInfo, Nd4jLong **tadOffsets) {
execShuffleKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vdX, xShapeInfo, vdZ, N, shuffleMap, tadOnlyShapeInfo, tadOffsets);
sd::DebugHelper::checkErrorCode(stream, "shuffleGeneric(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT shuffleKernelGeneric, (dim3 & launchDims, cudaStream_t * stream, void * *vdX, Nd4jLong * *xShapeInfo, void **vdZ, int N, int * shuffleMap, Nd4jLong * *tadOnlyShapeInfo, Nd4jLong * *tadOffsets), LIBND4J_TYPES);
} |
bfa26a0da5d7210f882c581e48428222bd3be540.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/optimizers/adagrad.hpp"
namespace lbann {
namespace {
template <typename TensorDataType>
__global__ void adagrad_kernel(size_t height,
size_t width,
TensorDataType learning_rate,
TensorDataType eps,
TensorDataType * __restrict__ values,
size_t values_ldim,
const TensorDataType * __restrict__ gradient,
size_t gradient_ldim,
TensorDataType * __restrict__ cache,
size_t cache_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t pos = gid; pos < height * width; pos += nthreads) {
const auto& row = pos % height;
const auto& col = pos / height;
auto& x = values[row + col * values_ldim];
const auto& g = gradient[row + col * gradient_ldim];
auto& c = cache[row + col * cache_ldim];
c += g * g;
x -= learning_rate * g / (cuda::sqrt(c) + eps);
}
}
} // namespace
template <typename TensorDataType>
void adagrad<TensorDataType>::step_compute_gpu(AbsDistMatrixType& values,
const AbsDistMatrixType& gradient) {
const size_t local_height = values.LocalHeight();
const size_t local_width = values.LocalWidth();
const size_t local_size = local_height * local_width;
if (local_size > 0) {
constexpr size_t block_size = 256;
const size_t grid_size = (local_size + block_size - 1) / block_size;
auto&& stream = hydrogen::cuda::GetDefaultStream();
hipLaunchKernelGGL(( adagrad_kernel<TensorDataType>), dim3(grid_size), dim3(block_size), 0, stream,
local_height, local_width,
this->get_learning_rate(), m_eps,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_cache->Buffer(), m_cache->LDim());
}
}
#ifdef LBANN_HAS_HALF
template <>
void adagrad<cpu_fp16>::step_compute_gpu(AbsDistMatrixType&,
const AbsDistMatrixType&) {
LBANN_ERROR("Can't call this function with cpu_fp16!");
}
#endif // LBANN_HAS_HALF
#define PROTO(T) \
template void adagrad<T>::step_compute_gpu( \
El::AbstractDistMatrix<T>&, \
const El::AbstractDistMatrix<T>&)
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| bfa26a0da5d7210f882c581e48428222bd3be540.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/optimizers/adagrad.hpp"
namespace lbann {
namespace {
template <typename TensorDataType>
__global__ void adagrad_kernel(size_t height,
size_t width,
TensorDataType learning_rate,
TensorDataType eps,
TensorDataType * __restrict__ values,
size_t values_ldim,
const TensorDataType * __restrict__ gradient,
size_t gradient_ldim,
TensorDataType * __restrict__ cache,
size_t cache_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t pos = gid; pos < height * width; pos += nthreads) {
const auto& row = pos % height;
const auto& col = pos / height;
auto& x = values[row + col * values_ldim];
const auto& g = gradient[row + col * gradient_ldim];
auto& c = cache[row + col * cache_ldim];
c += g * g;
x -= learning_rate * g / (cuda::sqrt(c) + eps);
}
}
} // namespace
template <typename TensorDataType>
void adagrad<TensorDataType>::step_compute_gpu(AbsDistMatrixType& values,
const AbsDistMatrixType& gradient) {
const size_t local_height = values.LocalHeight();
const size_t local_width = values.LocalWidth();
const size_t local_size = local_height * local_width;
if (local_size > 0) {
constexpr size_t block_size = 256;
const size_t grid_size = (local_size + block_size - 1) / block_size;
auto&& stream = hydrogen::cuda::GetDefaultStream();
adagrad_kernel<TensorDataType><<<grid_size, block_size, 0, stream>>>(
local_height, local_width,
this->get_learning_rate(), m_eps,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_cache->Buffer(), m_cache->LDim());
}
}
#ifdef LBANN_HAS_HALF
template <>
void adagrad<cpu_fp16>::step_compute_gpu(AbsDistMatrixType&,
const AbsDistMatrixType&) {
LBANN_ERROR("Can't call this function with cpu_fp16!");
}
#endif // LBANN_HAS_HALF
#define PROTO(T) \
template void adagrad<T>::step_compute_gpu( \
El::AbstractDistMatrix<T>&, \
const El::AbstractDistMatrix<T>&)
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
7ba42325846665047f8cd9e1902d2591709a5f13.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/base_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
for (int i = 0; i < top.size(); ++i) {
const Blob<Dtype>* blob = batch->blob(i);
// Reshape to loaded data.
top[i]->ReshapeLike(*blob);
// Copy the data
caffe_copy(blob->count(), blob->gpu_data(), top[i]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
| 7ba42325846665047f8cd9e1902d2591709a5f13.cu | #include <vector>
#include "caffe/layers/base_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
for (int i = 0; i < top.size(); ++i) {
const Blob<Dtype>* blob = batch->blob(i);
// Reshape to loaded data.
top[i]->ReshapeLike(*blob);
// Copy the data
caffe_copy(blob->count(), blob->gpu_data(), top[i]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
|
446032d0a1885be4db6c4b0e3f2a02292924d5dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void Slice(const int nthreads, const Dtype* in_data,
const bool forward, const int num_slices,
const int slice_size, const int bottom_slice_axis,
const int top_slice_axis, const int offset_slice_axis,
Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index = slice_index
+ (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
#endif // USE_ROCM
template<typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int offset_slice_axis = 0;
const Dtype* bottom_data = bottom[0]->gpu_data();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = true;
for (int i = 0; i < top.size(); ++i) {
Dtype* top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)(
nthreads, bottom_data, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
viennacl::ocl::kernel &oclk_slice = program.get_kernel(
CL_KERNEL_SELECT("slice"));
viennacl::ocl::enqueue(
oclk_slice(nthreads, WrapHandle((cl_mem) bottom_data, &ctx),
kForward ? 1 : 0, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis,
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
offset_slice_axis += top_slice_axis;
}
}
template<typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
int offset_slice_axis = 0;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = false;
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)(
nthreads, top_diff, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
viennacl::ocl::kernel &oclk_slice = program.get_kernel(
CL_KERNEL_SELECT("slice"));
viennacl::ocl::enqueue(
oclk_slice(nthreads, WrapHandle((cl_mem) top_diff, &ctx),
kForward ? 1 : 0, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis,
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
offset_slice_axis += top_slice_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer);
} // namespace caffe
| 446032d0a1885be4db6c4b0e3f2a02292924d5dd.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void Slice(const int nthreads, const Dtype* in_data,
const bool forward, const int num_slices,
const int slice_size, const int bottom_slice_axis,
const int top_slice_axis, const int offset_slice_axis,
Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index = slice_index
+ (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
#endif // USE_CUDA
template<typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int offset_slice_axis = 0;
const Dtype* bottom_data = bottom[0]->gpu_data();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = true;
for (int i = 0; i < top.size(); ++i) {
Dtype* top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)(
nthreads, bottom_data, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
viennacl::ocl::kernel &oclk_slice = program.get_kernel(
CL_KERNEL_SELECT("slice"));
viennacl::ocl::enqueue(
oclk_slice(nthreads, WrapHandle((cl_mem) bottom_data, &ctx),
kForward ? 1 : 0, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis,
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
offset_slice_axis += top_slice_axis;
}
}
template<typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
int offset_slice_axis = 0;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = false;
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)(
nthreads, top_diff, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
viennacl::ocl::kernel &oclk_slice = program.get_kernel(
CL_KERNEL_SELECT("slice"));
viennacl::ocl::enqueue(
oclk_slice(nthreads, WrapHandle((cl_mem) top_diff, &ctx),
kForward ? 1 : 0, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis,
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
offset_slice_axis += top_slice_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer);
} // namespace caffe
|
4c1754ec85a59fc340a58526f0cc7e270d7077bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <algorithm>
#include <cassert>
#include <numeric>
#include <vector>
#include <tuple>
#include <iostream>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include "parallel_compose.h"
#include "prefix_scan.h"
namespace gtn {
namespace detail {
namespace dataparallel {
namespace {
struct GraphDataParallelGPU {
size_t numNodes;
size_t numArcs;
// True if a node is accept or start, false otherwise
int* accept;
int* start;
// One value per node - i-th value corresponds to i-th node
// Last element is the total number of arcs, so that
// each element and its neighbor forms a range
int* inArcOffset;
int* outArcOffset;
// One value per arc
int* inArcs;
int* outArcs;
// One value per arc
// i-th value corresponds to i-th arc
int* ilabels;
int* olabels;
int* srcNodes;
int* dstNodes;
float* weights;
};
struct nodeAndArcPairGPU {
int2 nodePair;
int2 arcPair;
int2 checkEpsilonArcPair;
bool checkArcPair;
bool isValid;
};
inline int div_up(int x, int y) {
return (x + y - 1) / y;
}
__device__ __host__
inline int TwoDToOneDIndex(int n1, int n2, int n1Extent) {
assert(n1 < n1Extent);
return n1 + n2 * n1Extent;
}
__device__
inline int2 OneDToTwoDIndexGPU(int n, int n1Extent) {
assert(n1Extent > 0);
const int n2 = n / n1Extent;
const int n1 = n % n1Extent;
return make_int2(n1, n2);
}
bool checkAnyTrueGPU(const int* flags, int numFlags) {
thrust::device_ptr<const int> tPtr(flags);
const int sum = thrust::reduce(tPtr, tPtr + numFlags, int(0));
return (sum > 0);
}
std::tuple<int*, size_t, int> prefixSumScanGPU(const int* input, size_t numElts, bool appendSum) {
const size_t scanNumElts = appendSum ? numElts + 1 : numElts;
int *output;
hipMalloc((void **)(&(output)), sizeof(int) * scanNumElts);
hipMemcpy((void *)(output), (void *)(input), sizeof(int) * numElts, hipMemcpyDeviceToDevice);
int sum = 0;
if (numElts > 0) {
thrust::device_ptr<int> tPtr(output);
thrust::exclusive_scan(tPtr, tPtr + numElts, tPtr);
int lastElementInput;
int lastElementOutput;
hipMemcpy((void *)(&lastElementInput), (void *)(&(input[numElts-1])), sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy((void *)(&lastElementOutput), (void *)(&(output[numElts-1])), sizeof(int), hipMemcpyDeviceToHost);
sum = lastElementInput + lastElementOutput;
}
if (appendSum) {
assert(scanNumElts > 0);
hipMemcpy((void *)(&(output[scanNumElts-1])), (void *)(&sum), sizeof(int), hipMemcpyHostToDevice);
}
return std::make_tuple(output, scanNumElts, sum);
}
// Map thread id to corresponding node and arc pair
// Also map thread id to two flags checkEpsilonArcPair.first,
// checkEpsilonArcPair.second When checkEpsilonArcPair.first is set,
// corresponding tid will check for arcs with epsilon arcs in the node from
// first graph Same logic happens for checkEpsilonArcPair.second Search to find
// which node pair this tid will fall into Linear search for now
// (arcCrossProductOffset is sorted by definition)
__device__
nodeAndArcPairGPU computeNodeAndArcPair(
int tid,
size_t numArcCrossProductOffset,
const int* arcCrossProductOffset,
const int* toExploreNumArcsFirst,
const int* toExploreNumArcsSecond,
const int* toExploreNodePairFirst,
const int* toExploreNodePairSecond) {
nodeAndArcPairGPU result;
result.checkArcPair = false;
result.checkEpsilonArcPair = make_int2(false, false);
result.isValid = false;
// There should be at least two values to form a range
assert(numArcCrossProductOffset >= 2);
for (size_t i = 0; i < numArcCrossProductOffset - 1; ++i) {
const int lVal = arcCrossProductOffset[i];
const int rVal = arcCrossProductOffset[i + 1];
if ((lVal <= tid) && (tid < rVal)) {
result.isValid = true;
result.nodePair = make_int2(
toExploreNodePairFirst[i], toExploreNodePairSecond[i]);
// The range of idx is from
// [0, toExploreNumArcsFirst[i] * toExploreNumArcsSecond[i])
const int idx = tid - lVal;
const int numArcs = rVal - lVal;
assert(idx >= 0);
assert(idx < numArcs);
assert(numArcs > 0);
const int arcProd =
toExploreNumArcsFirst[i] * toExploreNumArcsSecond[i];
if (numArcs == arcProd) {
result.checkArcPair = true;
// We map the tids to 2D grid where the
// x-axis is toExploreNumArcsFirst[i] (row)
// y-axis is toExploreNumArcsSecond[i] (column)
assert(toExploreNumArcsFirst[i] > 0);
result.arcPair = make_int2(
idx % toExploreNumArcsFirst[i],
idx / toExploreNumArcsFirst[i]);
// Pick the tids from the first row since we need only one
// tid per arc of the node from the first graph to check for
// epsilon
if (idx < toExploreNumArcsFirst[i]) {
result.checkEpsilonArcPair.x = true;
}
// Pick the tids from the first column since we need only one
// tid per arc of the node from the first graph to check for
// epsilon
if ((idx % toExploreNumArcsFirst[i]) == 0) {
result.checkEpsilonArcPair.y = true;
}
} else if ((arcProd == 0) && (numArcs == toExploreNumArcsFirst[i])) {
// TODO: Likely not the brightest idea to use -1 as sentinel
result.arcPair = make_int2(idx, -1);
result.checkEpsilonArcPair.x = true;
} else if ((arcProd == 0) && (numArcs == toExploreNumArcsSecond[i])) {
// TODO: Likely not the brightest idea to use -1 as sentinel
result.arcPair = make_int2(-1, idx);
result.checkEpsilonArcPair.y = true;
}
break;
}
}
return result;
}
__global__
void calculateArcCrossProductOffsetKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* toExploreNodePairFirstGPU,
const int* toExploreNodePairSecondGPU,
int* toExploreNumArcsFirstGPU,
int* toExploreNumArcsSecondGPU,
int* arcCrossProductOffsetGPU,
size_t numToExploreNodePair,
bool inOrOutArc) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numToExploreNodePair) {
int node = toExploreNodePairFirstGPU[gTid];
// Special case if it is the last node. Then the offset becomes
// the number of arcs
const int inArcOffsetGraph1 = ((node + 1) == graphDP1GPU.numNodes)
? graphDP1GPU.numArcs
: graphDP1GPU.inArcOffset[node + 1];
const int outArcOffsetGraph1 = ((node + 1) == graphDP1GPU.numNodes)
? graphDP1GPU.numArcs
: graphDP1GPU.outArcOffset[node + 1];
const int numArcsFirst = inOrOutArc
? inArcOffsetGraph1 - graphDP1GPU.inArcOffset[node]
: outArcOffsetGraph1 - graphDP1GPU.outArcOffset[node];
node = toExploreNodePairSecondGPU[gTid];
// Special case if it is the last node. Then the offset becomes
// the number of arcs
const int inArcOffsetGraph2 = ((node + 1) == graphDP2GPU.numNodes)
? graphDP2GPU.numArcs
: graphDP2GPU.inArcOffset[node + 1];
const int outArcOffsetGraph2 = ((node + 1) == graphDP2GPU.numNodes)
? graphDP2GPU.numArcs
: graphDP2GPU.outArcOffset[node + 1];
const int numArcsSecond = inOrOutArc
? inArcOffsetGraph2 - graphDP2GPU.inArcOffset[node]
: outArcOffsetGraph2 - graphDP2GPU.outArcOffset[node];
toExploreNumArcsFirstGPU[gTid] = numArcsFirst;
toExploreNumArcsSecondGPU[gTid] = numArcsSecond;
// Even when numArcsFirst or numArcsSecond is 0 we have to consider
// the case when the other graph has arcs with epsilon label
if (numArcsFirst != 0 && numArcsSecond != 0) {
arcCrossProductOffsetGPU[gTid] = numArcsFirst * numArcsSecond;
} else if (numArcsFirst != 0 && numArcsSecond == 0) {
arcCrossProductOffsetGPU[gTid] = numArcsFirst;
} else if (numArcsFirst == 0 && numArcsSecond != 0) {
arcCrossProductOffsetGPU[gTid] = numArcsSecond;
} else {
arcCrossProductOffsetGPU[gTid] = 0;
}
}
}
// Takes a pair of nodes, where each member of pair comes from a different
// graph and calculate a vector of number of arcs in the cross product of
// arcs outgoing from each pair.
// This should be a kernel call
std::tuple<int*, int*, int*>
calculateArcCrossProductOffsetGPU(
const int* toExploreNodePairFirstGPU,
const int* toExploreNodePairSecondGPU,
size_t numToExploreNodePair,
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
bool inOrOutArc) {
int* toExploreNumArcsFirstGPU;
int* toExploreNumArcsSecondGPU;
int* arcCrossProductOffsetGPU;
hipMalloc((void **)(&(toExploreNumArcsFirstGPU)), sizeof(int) * numToExploreNodePair);
hipMalloc((void **)(&(toExploreNumArcsSecondGPU)), sizeof(int) * numToExploreNodePair);
hipMalloc((void **)(&(arcCrossProductOffsetGPU)), sizeof(int) * numToExploreNodePair);
const int NT = 128;
const int gridSize = div_up(numToExploreNodePair, NT);
hipLaunchKernelGGL(( calculateArcCrossProductOffsetKernel), dim3(gridSize), dim3(NT), 0, 0,
graphDP1GPU, graphDP2GPU, toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU, arcCrossProductOffsetGPU,
numToExploreNodePair, inOrOutArc);
return std::make_tuple(arcCrossProductOffsetGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU);
}
// This function needs to be thread safe since multiple threads can
// can call it and they will overlap on curIdx and dstIdx
__device__
void calculateNumArcsAndNodesToExplore(
int curIdx,
int dstIdx,
const int* reachable,
int* newNodes,
int* toExplore,
int* numOutArcs,
int* numInArcs) {
if (reachable[dstIdx]) {
// Atomic test and set for newNodes
/*
int oldVal = newNodes[dstIdx];
if (!newNodes[dstIdx]) {
newNodes[dstIdx] = true;
}*/
int oldVal = atomicCAS(&(newNodes[dstIdx]), false, true);
if (!oldVal) {
toExplore[dstIdx] = true;
}
// These are atomic increments
// numOutArcs[curIdx]++;
// numInArcs[dstIdx]++;
atomicAdd(&(numOutArcs[curIdx]), 1);
atomicAdd(&(numInArcs[dstIdx]), 1);
// printf("cidx %d didx %d\n", curIdx, dstIdx);
// printf("no %d ni %d\n", numOutArcs[curIdx], numInArcs[dstIdx]);
}
}
// This function needs to be thread safe since multiple threads can
// can call it
__device__
void generateCombinedGraphNodesAndArcs(
int dstIdx,
int curIdx,
const int2& arcPair,
const int2& dstNodeStartAndAccept,
const int* reachable,
const int* newNodesOffset,
int* newNodesVisited,
int* toExplore,
int* gradInfoFirst,
int* gradInfoSecond,
GraphDataParallelGPU& newGraphDP,
int ilabel,
int olabel,
float weight) {
if (reachable[dstIdx]) {
// Atomic test and set for newNodesVisited
/*
int oldVal = newNodesVisited[dstIdx];
if (!newNodesVisited[dstIdx]) {
newNodesVisited[dstIdx] = true;
}*/
int oldVal = atomicCAS(&(newNodesVisited[dstIdx]), false, true);
if (!oldVal) {
toExplore[dstIdx] = true;
}
// Set accept and start nodes
// I think I only need it for dst nodes and src nodes
// Note: Multiple threads can have the same dstIdx and write to the same
// location and collide. This _should_ be fine since they are going
// to write the same value
newGraphDP.start[newNodesOffset[dstIdx]] = dstNodeStartAndAccept.x;
newGraphDP.accept[newNodesOffset[dstIdx]] = dstNodeStartAndAccept.y;
// Both of these increments are atomic
// int inArcIdx = newGraphDP.inArcOffset[newNodesOffset[dstIdx]]++;
// int outArcIdx = newGraphDP.outArcOffset[newNodesOffset[curIdx]]++;
int inArcIdx = atomicAdd(&(newGraphDP.inArcOffset[newNodesOffset[dstIdx]]), 1);
int outArcIdx = atomicAdd(&(newGraphDP.outArcOffset[newNodesOffset[curIdx]]), 1);
// printf("dstIdx %d curIdx %d\n", dstIdx, curIdx);
// printf("inArcIdx %d outArcIdx %d\n", inArcIdx, outArcIdx);
// outArcIdx is also the arc identifier
newGraphDP.outArcs[outArcIdx] = outArcIdx;
newGraphDP.inArcs[inArcIdx] = outArcIdx;
// Fill in everything else for this arc
newGraphDP.ilabels[outArcIdx] = ilabel;
newGraphDP.olabels[outArcIdx] = olabel;
newGraphDP.srcNodes[outArcIdx] = newNodesOffset[curIdx];
newGraphDP.dstNodes[outArcIdx] = newNodesOffset[dstIdx];
newGraphDP.weights[outArcIdx] = weight;
// printf("ilabels %d olabels %d srcNodes %d dstNodes %d weights %f\n",
// newGraphDP.ilabels[outArcIdx], newGraphDP.olabels[outArcIdx],
// newGraphDP.srcNodes[outArcIdx], newGraphDP.dstNodes[outArcIdx],
// newGraphDP.weights[outArcIdx]);
gradInfoFirst[outArcIdx] = arcPair.x;
gradInfoSecond[outArcIdx] = arcPair.y;
}
}
__global__
void convertToNodePairKernel(
const int* flagsGPU,
const int* indicesGPU,
int* toExploreNodePairFirstGPU,
int* toExploreNodePairSecondGPU,
int extent,
size_t numFlags,
size_t numValidNodes) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numFlags) {
if (flagsGPU[gTid] == true) {
const int index = indicesGPU[gTid];
assert(index >= 0);
assert(index < numValidNodes);
int2 node = OneDToTwoDIndexGPU(gTid, extent);
toExploreNodePairFirstGPU[index] = node.x;
toExploreNodePairSecondGPU[index] = node.y;
}
}
}
// Convert bool array two pairs for true flags
std::tuple<int*, int*, size_t> convertToNodePairGPU(
const int* flagsGPU,
size_t numFlags,
int extent) {
int* indicesGPU;
size_t numIndices;
size_t numValidNodes;
std::tie(indicesGPU, numIndices, numValidNodes) = prefixSumScanGPU(flagsGPU, numFlags, false);
assert(numFlags == numIndices);
int* toExploreNodePairFirstGPU;
int* toExploreNodePairSecondGPU;
hipMalloc((void **)(&(toExploreNodePairFirstGPU)), sizeof(int) * numValidNodes);
hipMalloc((void **)(&(toExploreNodePairSecondGPU)), sizeof(int) * numValidNodes);
const int NT = 128;
const int gridSize = div_up(numFlags, NT);
hipLaunchKernelGGL(( convertToNodePairKernel), dim3(gridSize), dim3(NT), 0, 0, flagsGPU, indicesGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
extent, numFlags, numValidNodes);
hipFree(indicesGPU);
return std::make_tuple(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, numValidNodes);
}
__device__
int2 getStartAndAccept(
const GraphDataParallelGPU& graphDP1,
const GraphDataParallelGPU& graphDP2,
const int2& dstNodePair) {
int2 dstNodeStartAndAccept = make_int2(
graphDP1.start[dstNodePair.x] && graphDP2.start[dstNodePair.y],
graphDP1.accept[dstNodePair.x] &&
graphDP2.accept[dstNodePair.y]);
return dstNodeStartAndAccept;
}
GraphDataParallelGPU copyToGPU(const GraphDataParallel& graphDP) {
GraphDataParallelGPU graphDPGPU;
graphDPGPU.numNodes = graphDP.inArcOffset.size();
graphDPGPU.numArcs = graphDP.inArcs.size();
assert(graphDP.accept.size() == graphDPGPU.numNodes);
assert(graphDP.start.size() == graphDPGPU.numNodes);
assert(graphDP.inArcOffset.size() == graphDPGPU.numNodes);
assert(graphDP.outArcOffset.size() == graphDPGPU.numNodes);
assert(graphDP.inArcs.size() == graphDPGPU.numArcs);
assert(graphDP.outArcs.size() == graphDPGPU.numArcs);
assert(graphDP.ilabels.size() == graphDPGPU.numArcs);
assert(graphDP.olabels.size() == graphDPGPU.numArcs);
assert(graphDP.srcNodes.size() == graphDPGPU.numArcs);
assert(graphDP.dstNodes.size() == graphDPGPU.numArcs);
assert(graphDP.weights.size() == graphDPGPU.numArcs);
// Allocate memory
hipMalloc((void **)(&(graphDPGPU.accept)), sizeof(int) * graphDPGPU.numNodes);
hipMalloc((void **)(&(graphDPGPU.start)), sizeof(int) * graphDPGPU.numNodes);
hipMalloc((void **)(&(graphDPGPU.inArcOffset)), sizeof(int) * graphDPGPU.numNodes);
hipMalloc((void **)(&(graphDPGPU.outArcOffset)), sizeof(int) * graphDPGPU.numNodes);
hipMalloc((void **)(&(graphDPGPU.inArcs)), sizeof(int) * graphDPGPU.numArcs);
hipMalloc((void **)(&(graphDPGPU.outArcs)), sizeof(int) * graphDPGPU.numArcs);
hipMalloc((void **)(&(graphDPGPU.ilabels)), sizeof(int) * graphDPGPU.numArcs);
hipMalloc((void **)(&(graphDPGPU.olabels)), sizeof(int) * graphDPGPU.numArcs);
hipMalloc((void **)(&(graphDPGPU.srcNodes)), sizeof(int) * graphDPGPU.numArcs);
hipMalloc((void **)(&(graphDPGPU.dstNodes)), sizeof(int) * graphDPGPU.numArcs);
hipMalloc((void **)(&(graphDPGPU.weights)), sizeof(float) * graphDPGPU.numArcs);
// Copy
hipMemcpy((void *)(graphDPGPU.accept), (void *)(graphDP.accept.data()), sizeof(int) * graphDPGPU.numNodes, hipMemcpyHostToDevice);
hipMemcpy((void *)(graphDPGPU.start), (void *)(graphDP.start.data()), sizeof(int) * graphDPGPU.numNodes, hipMemcpyHostToDevice);
hipMemcpy((void *)(graphDPGPU.inArcOffset), (void *)(graphDP.inArcOffset.data()), sizeof(int) * graphDPGPU.numNodes, hipMemcpyHostToDevice);
hipMemcpy((void *)(graphDPGPU.outArcOffset), (void *)(graphDP.outArcOffset.data()), sizeof(int) * graphDPGPU.numNodes, hipMemcpyHostToDevice);
hipMemcpy((void *)(graphDPGPU.inArcs), (void *)(graphDP.inArcs.data()), sizeof(int) * graphDPGPU.numArcs, hipMemcpyHostToDevice);
hipMemcpy((void *)(graphDPGPU.outArcs), (void *)(graphDP.outArcs.data()), sizeof(int) * graphDPGPU.numArcs, hipMemcpyHostToDevice);
hipMemcpy((void *)(graphDPGPU.ilabels), (void *)(graphDP.ilabels.data()), sizeof(int) * graphDPGPU.numArcs, hipMemcpyHostToDevice);
hipMemcpy((void *)(graphDPGPU.olabels), (void *)(graphDP.olabels.data()), sizeof(int) * graphDPGPU.numArcs, hipMemcpyHostToDevice);
hipMemcpy((void *)(graphDPGPU.srcNodes), (void *)(graphDP.srcNodes.data()), sizeof(int) * graphDPGPU.numArcs, hipMemcpyHostToDevice);
hipMemcpy((void *)(graphDPGPU.dstNodes), (void *)(graphDP.dstNodes.data()), sizeof(int) * graphDPGPU.numArcs, hipMemcpyHostToDevice);
hipMemcpy((void *)(graphDPGPU.weights), (void *)(graphDP.weights.data()), sizeof(float) * graphDPGPU.numArcs, hipMemcpyHostToDevice);
return graphDPGPU;
}
__global__
void findReachableKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* arcCrossProductOffsetGPU,
const int* toExploreNumArcsFirstGPU,
const int* toExploreNumArcsSecondGPU,
const int* toExploreNodePairFirstGPU,
const int* toExploreNodePairSecondGPU,
int numNodesFirst,
int totalArcs,
size_t numArcCrossProductOffset,
int* toExploreGPU,
int* reachableGPU,
int* epsilonMatchedGPU
) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < totalArcs) {
nodeAndArcPairGPU result =
computeNodeAndArcPair(
gTid, numArcCrossProductOffset, arcCrossProductOffsetGPU,
toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU);
// printf("tid = %d, valid = %d\n", gTid, result.isValid);
// Does this node pair match?
if (result.isValid) {
int inArcOffset = graphDP1GPU.inArcOffset[result.nodePair.x];
const int firstArcIdx = graphDP1GPU.inArcs[inArcOffset + result.arcPair.x];
inArcOffset = graphDP2GPU.inArcOffset[result.nodePair.y];
const int secondArcIdx = graphDP2GPU.inArcs[inArcOffset + result.arcPair.y];
// printf("tid = %d, cp = %d\n", gTid, result.checkArcPair);
if (result.checkArcPair &&
(graphDP1GPU.olabels[firstArcIdx] == graphDP2GPU.ilabels[secondArcIdx])) {
const int idx = TwoDToOneDIndex(
graphDP1GPU.srcNodes[firstArcIdx],
graphDP2GPU.srcNodes[secondArcIdx],
numNodesFirst);
// printf("tid = %d, idx = %d\n", gTid, idx);
if (graphDP1GPU.olabels[firstArcIdx] == epsilon) {
epsilonMatchedGPU[idx] = true;
}
// idx may not be unique amongst all threads.
/*
int oldVal = reachableGPU[idx];
if (!reachableGPU[idx]) {
reachableGPU[idx] = true;
}*/
int oldVal = atomicCAS(&(reachableGPU[idx]), false, true);
if (!oldVal) {
toExploreGPU[idx] = true;
}
// printf("r %d t %d \n", reachableGPU[idx], toExploreGPU[idx]);
}
// Only valid for arcs incoming to node from first graph
if (result.checkEpsilonArcPair.x &&
(graphDP1GPU.olabels[firstArcIdx] == epsilon)) {
const int idx = TwoDToOneDIndex(
graphDP1GPU.srcNodes[firstArcIdx], result.nodePair.y, numNodesFirst);
/*
int oldVal = reachableGPU[idx];
if (!reachableGPU[idx]) {
reachableGPU[idx] = true;
}*/
int oldVal = atomicCAS(&(reachableGPU[idx]), false, true);
if (!oldVal) {
toExploreGPU[idx] = true;
}
}
// Only valid for arcs incoming to node from second graph
if (result.checkEpsilonArcPair.y &&
(graphDP2GPU.ilabels[secondArcIdx] == epsilon)) {
const int idx = TwoDToOneDIndex(
result.nodePair.x, graphDP2GPU.srcNodes[secondArcIdx], numNodesFirst);
/*
int oldVal = reachableGPU[idx];
if (!reachableGPU[idx]) {
reachableGPU[idx] = true;
}*/
int oldVal = atomicCAS(&(reachableGPU[idx]), false, true);
if (!oldVal) {
toExploreGPU[idx] = true;
}
}
}
}
}
__global__
void computeValidNodeAndArcKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* arcCrossProductOffsetGPU,
const int* toExploreNumArcsFirstGPU,
const int* toExploreNumArcsSecondGPU,
const int* toExploreNodePairFirstGPU,
const int* toExploreNodePairSecondGPU,
const int* reachableGPU,
const int* epsilonMatchedGPU,
int numNodesFirst,
int totalArcs,
size_t numArcCrossProductOffset,
int* toExploreGPU,
int* newNodesGPU,
int* numInArcsGPU,
int* numOutArcsGPU
) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < totalArcs) {
// Map tid to corresponding node and arc pair
// Search to find which node pair this tid will fall into
nodeAndArcPairGPU result =
computeNodeAndArcPair(
gTid, numArcCrossProductOffset, arcCrossProductOffsetGPU,
toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU);
if (result.isValid) {
int outArcOffset = graphDP1GPU.outArcOffset[result.nodePair.x];
const int firstArcIdx = graphDP1GPU.outArcs[outArcOffset + result.arcPair.x];
outArcOffset = graphDP2GPU.outArcOffset[result.nodePair.y];
const int secondArcIdx =
graphDP2GPU.outArcs[outArcOffset + result.arcPair.y];
const bool epsilonMatch = epsilonMatchedGPU[TwoDToOneDIndex(
result.nodePair.x, result.nodePair.y, numNodesFirst)];
// Does this node pair match?
// Skip epsilon matches
if (result.checkArcPair &&
(graphDP1GPU.olabels[firstArcIdx] == graphDP2GPU.ilabels[secondArcIdx])) {
const int dstIdx = TwoDToOneDIndex(
graphDP1GPU.dstNodes[firstArcIdx],
graphDP2GPU.dstNodes[secondArcIdx],
numNodesFirst);
const int curIdx =
TwoDToOneDIndex(result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krnl 1a dst %d cur %d\n", dstIdx, curIdx);
// We track if any two arcs outgoing from this node pair match
// on epsilon. We record if they do.
if (graphDP1GPU.olabels[firstArcIdx] != epsilon) {
calculateNumArcsAndNodesToExplore(
curIdx,
dstIdx,
reachableGPU,
newNodesGPU,
toExploreGPU,
numOutArcsGPU,
numInArcsGPU);
}
}
if (result.checkEpsilonArcPair.x &&
(!epsilonMatch || graphDP2GPU.accept[result.nodePair.y] ||
!graphDP1GPU.accept[result.nodePair.x]) &&
(graphDP1GPU.olabels[firstArcIdx] == epsilon)) {
const int dstIdx = TwoDToOneDIndex(
graphDP1GPU.dstNodes[firstArcIdx], result.nodePair.y, numNodesFirst);
const int curIdx =
TwoDToOneDIndex(result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krnl 1b dst %d cur %d\n", dstIdx, curIdx);
calculateNumArcsAndNodesToExplore(
curIdx,
dstIdx,
reachableGPU,
newNodesGPU,
toExploreGPU,
numOutArcsGPU,
numInArcsGPU);
}
if (result.checkEpsilonArcPair.y &&
(!epsilonMatch || graphDP1GPU.accept[result.nodePair.x]) &&
(graphDP2GPU.ilabels[secondArcIdx] == epsilon)) {
const int dstIdx = TwoDToOneDIndex(
result.nodePair.x, graphDP2GPU.dstNodes[secondArcIdx], numNodesFirst);
const int curIdx =
TwoDToOneDIndex(result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krnl 1c dst %d cur %d\n", dstIdx, curIdx);
calculateNumArcsAndNodesToExplore(
curIdx,
dstIdx,
reachableGPU,
newNodesGPU,
toExploreGPU,
numOutArcsGPU,
numInArcsGPU);
}
}
}
}
__global__
void generateNodeAndArcKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* arcCrossProductOffsetGPU,
const int* toExploreNumArcsFirstGPU,
const int* toExploreNumArcsSecondGPU,
const int* toExploreNodePairFirstGPU,
const int* toExploreNodePairSecondGPU,
const int* reachableGPU,
const int* epsilonMatchedGPU,
int numNodesFirst,
int totalArcs,
size_t numArcCrossProductOffset,
GraphDataParallelGPU newGraphDPGPU,
int* toExploreGPU,
int* gradInfoFirstGPU,
int* gradInfoSecondGPU,
int* newNodesOffsetGPU,
int* newNodesVisitedGPU
) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < totalArcs) {
// Map tid to corresponding node and arc pair
// Search to find which node pair this tid will fall into
nodeAndArcPairGPU result =
computeNodeAndArcPair(
gTid, numArcCrossProductOffset, arcCrossProductOffsetGPU,
toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU);
if (result.isValid) {
int outArcOffset = graphDP1GPU.outArcOffset[result.nodePair.x];
const int firstArcIdx = graphDP1GPU.outArcs[outArcOffset + result.arcPair.x];
outArcOffset = graphDP2GPU.outArcOffset[result.nodePair.y];
const int secondArcIdx =
graphDP2GPU.outArcs[outArcOffset + result.arcPair.y];
const bool epsilonMatch = epsilonMatchedGPU[TwoDToOneDIndex(
result.nodePair.x, result.nodePair.y, numNodesFirst)];
// Does this node pair match?
if (result.checkArcPair &&
(graphDP1GPU.olabels[firstArcIdx] == graphDP2GPU.ilabels[secondArcIdx])) {
int2 dstNodePair = make_int2(
graphDP1GPU.dstNodes[firstArcIdx], graphDP2GPU.dstNodes[secondArcIdx]);
const int dstIdx = TwoDToOneDIndex(
dstNodePair.x, dstNodePair.y, numNodesFirst);
const int curIdx = TwoDToOneDIndex(
result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krn2a dstIdx=%d curIdx=%d\n", dstIdx, curIdx);
const int2 dstNodeStartAccept =
getStartAndAccept(graphDP1GPU, graphDP2GPU, dstNodePair);
// We track if any two arcs outgoing from this node pair match
// on epsilon. We record if they do.
if (graphDP1GPU.olabels[firstArcIdx] != epsilon) {
generateCombinedGraphNodesAndArcs(
dstIdx,
curIdx,
make_int2(firstArcIdx, secondArcIdx),
dstNodeStartAccept,
reachableGPU,
newNodesOffsetGPU,
newNodesVisitedGPU,
toExploreGPU,
gradInfoFirstGPU,
gradInfoSecondGPU,
newGraphDPGPU,
graphDP1GPU.ilabels[firstArcIdx],
graphDP2GPU.olabels[secondArcIdx],
graphDP1GPU.weights[firstArcIdx] + graphDP2GPU.weights[secondArcIdx]);
}
}
// The epsilon matches
if (result.checkEpsilonArcPair.x &&
(!epsilonMatch || graphDP2GPU.accept[result.nodePair.y] ||
!graphDP1GPU.accept[result.nodePair.x]) &&
(graphDP1GPU.olabels[firstArcIdx] == epsilon)) {
// When arc from first node has epsilon label then we consider
// second node
int2 dstNodePair = make_int2(
graphDP1GPU.dstNodes[firstArcIdx], result.nodePair.y);
const int dstIdx = TwoDToOneDIndex(
dstNodePair.x, dstNodePair.y, numNodesFirst);
const int curIdx = TwoDToOneDIndex(
result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krn2b dstIdx=%d curIdx=%d\n", dstIdx, curIdx);
const int2 dstNodeStartAccept =
getStartAndAccept(graphDP1GPU, graphDP2GPU, dstNodePair);
generateCombinedGraphNodesAndArcs(
dstIdx,
curIdx,
make_int2(firstArcIdx, -1),
dstNodeStartAccept,
reachableGPU,
newNodesOffsetGPU,
newNodesVisitedGPU,
toExploreGPU,
gradInfoFirstGPU,
gradInfoSecondGPU,
newGraphDPGPU,
graphDP1GPU.ilabels[firstArcIdx],
epsilon,
graphDP1GPU.weights[firstArcIdx]);
}
// The epsilon matches
if (result.checkEpsilonArcPair.y &&
(!epsilonMatch || graphDP1GPU.accept[result.nodePair.x]) &&
(graphDP2GPU.ilabels[secondArcIdx] == epsilon)) {
// When arc from second node has epsilon label then we consider
// first node
int2 dstNodePair = make_int2(
result.nodePair.x, graphDP2GPU.dstNodes[secondArcIdx]);
const int dstIdx = TwoDToOneDIndex(
dstNodePair.x, dstNodePair.y, numNodesFirst);
const int curIdx = TwoDToOneDIndex(
result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krn2c dstIdx=%d curIdx=%d\n", dstIdx, curIdx);
const int2 dstNodeStartAndAccept =
getStartAndAccept(graphDP1GPU, graphDP2GPU, dstNodePair);
generateCombinedGraphNodesAndArcs(
dstIdx,
curIdx,
make_int2(-1, secondArcIdx),
dstNodeStartAndAccept,
reachableGPU,
newNodesOffsetGPU,
newNodesVisitedGPU,
toExploreGPU,
gradInfoFirstGPU,
gradInfoSecondGPU,
newGraphDPGPU,
epsilon,
graphDP2GPU.olabels[secondArcIdx],
graphDP2GPU.weights[secondArcIdx]);
}
}
}
}
__global__
void calculateNumArcsKernel(
const int* flagsGPU,
const int* indicesGPU,
const int* inputInArcsGPU,
const int* inputOutArcsGPU,
int* outputInArcsGPU,
int* outputOutArcsGPU,
size_t numFlags,
size_t numValidNodes) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numFlags) {
if (flagsGPU[gTid] == true) {
const int index = indicesGPU[gTid];
assert(index >= 0);
assert(index < numValidNodes);
outputInArcsGPU[index] = inputInArcsGPU[gTid];
outputOutArcsGPU[index] = inputOutArcsGPU[gTid];
}
}
}
__global__
void fourthPassInitKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* reachableGPU,
const int* newNodesOffsetGPU,
GraphDataParallelGPU newGraphDPGPU,
int* toExploreGPU,
int* newNodesVisitedGPU,
int numNodesFirst,
int numNodes) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numNodes) {
int2 indices = OneDToTwoDIndexGPU(gTid, numNodesFirst);
if (graphDP1GPU.start[indices.x] && graphDP2GPU.start[indices.y]) {
if (reachableGPU[gTid]) {
toExploreGPU[gTid] = true;
newNodesVisitedGPU[gTid] = true;
newGraphDPGPU.start[newNodesOffsetGPU[gTid]] = true;
newGraphDPGPU.accept[newNodesOffsetGPU[gTid]] =
graphDP1GPU.accept[indices.x] && graphDP2GPU.accept[indices.y];
}
}
}
}
__global__
void secondPassInitKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* reachableGPU,
int* toExploreGPU,
int* newNodesGPU,
int numNodesFirst,
int numNodes) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numNodes) {
int2 indices = OneDToTwoDIndexGPU(gTid, numNodesFirst);
if (graphDP1GPU.start[indices.x] && graphDP2GPU.start[indices.y]) {
if (reachableGPU[gTid]) {
toExploreGPU[gTid] = true;
newNodesGPU[gTid] = true;
}
}
}
}
__global__
void findReachableInitInitKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
int* reachableGPU,
int* toExploreGPU,
int numNodesFirst,
int numNodes) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numNodes) {
int2 indices = OneDToTwoDIndexGPU(gTid, numNodesFirst);
if (graphDP1GPU.accept[indices.x] && graphDP2GPU.accept[indices.y]) {
toExploreGPU[gTid] = true;
reachableGPU[gTid] = true;
}
}
}
} // namespace
Graph compose(const Graph& first, const Graph& second) {
GraphDataParallel graphDP1, graphDP2;
// Convert from AOS to SOA
graphDP1 = convertToDataParallel(first);
graphDP2 = convertToDataParallel(second);
// Copy to GPU
GraphDataParallelGPU graphDP1GPU, graphDP2GPU;
graphDP1GPU = copyToGPU(graphDP1);
graphDP2GPU = copyToGPU(graphDP2);
const int numAllPairNodes = first.numNodes() * second.numNodes();
const int numNodesFirst = first.numNodes();
// Fixed number of CUDA threads and stream for all kernels
const int NT = 128;
//////////////////////////////////////////////////////////////////////////
// Step 1: Data parallel findReachable
//////////////////////////////////////////////////////////////////////////
int* reachableGPU;
int* epsilonMatchedGPU;
int* toExploreGPU;
hipMalloc((void **)(&reachableGPU), sizeof(int) * numAllPairNodes);
hipMalloc((void **)(&epsilonMatchedGPU), sizeof(int) * numAllPairNodes);
hipMalloc((void **)(&toExploreGPU), sizeof(int) * numAllPairNodes);
hipMemset((void*)reachableGPU, false, sizeof(int) * numAllPairNodes);
hipMemset((void*)epsilonMatchedGPU, false, sizeof(int) * numAllPairNodes);
hipMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
{
const int gridSize = div_up(numAllPairNodes, NT);
hipLaunchKernelGGL(( findReachableInitInitKernel), dim3(gridSize), dim3(NT), 0, 0, graphDP1GPU, graphDP2GPU,
reachableGPU, toExploreGPU, numNodesFirst, numAllPairNodes);
}
// std::cout << "num all pair nodes " << numAllPairNodes << std::endl;
// This is the outer control loop that would spawn DP kernels
while(checkAnyTrueGPU(toExploreGPU, numAllPairNodes)) {
int* toExploreNodePairFirstGPU;
int* toExploreNodePairSecondGPU;
size_t numToExploreNodePair;
// Convert bits set in toExplore to node pairs
std::tie(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, numToExploreNodePair) =
convertToNodePairGPU(toExploreGPU, numAllPairNodes, numNodesFirst);
int* arcCrossProductIndexGPU;
int* toExploreNumArcsFirstGPU;
int* toExploreNumArcsSecondGPU;
std::tie(arcCrossProductIndexGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU) =
calculateArcCrossProductOffsetGPU(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
numToExploreNodePair, graphDP1GPU, graphDP2GPU, true);
int* arcCrossProductOffsetGPU;
size_t numArcCrossProductOffset;
int totalArcs;
std::tie(arcCrossProductOffsetGPU, numArcCrossProductOffset, totalArcs) =
prefixSumScanGPU(arcCrossProductIndexGPU, numToExploreNodePair, true);
assert(numArcCrossProductOffset == (numToExploreNodePair + 1));
hipFree(arcCrossProductIndexGPU);
// Reset so pristine state for next frontier to explore
hipMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
if (totalArcs > 0) {
const int gridSize = div_up(totalArcs, NT);
hipLaunchKernelGGL(( findReachableKernel), dim3(gridSize), dim3(NT), 0, 0, graphDP1GPU, graphDP2GPU, arcCrossProductOffsetGPU,
toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU, toExploreNodePairFirstGPU,
toExploreNodePairSecondGPU, numNodesFirst, totalArcs, numArcCrossProductOffset,
toExploreGPU, reachableGPU, epsilonMatchedGPU);
}
hipFree(toExploreNodePairFirstGPU);
hipFree(toExploreNodePairSecondGPU);
hipFree(arcCrossProductOffsetGPU);
hipFree(toExploreNumArcsFirstGPU);
hipFree(toExploreNumArcsSecondGPU);
} // end while for findReachable
//////////////////////////////////////////////////////////////////////////
// Step 2: Compute a) valid nodes in combined graph
// b) Number of in and out arcs in combined graph
// This information is used to generate offsets for nodes and arcs
// in the combined graph
//////////////////////////////////////////////////////////////////////////
int* newNodesGPU;
int* numOutArcsGPU;
int* numInArcsGPU;
hipMalloc((void **)(&newNodesGPU), sizeof(int) * numAllPairNodes);
hipMalloc((void **)(&numOutArcsGPU), sizeof(int) * numAllPairNodes);
hipMalloc((void **)(&numInArcsGPU), sizeof(int) * numAllPairNodes);
hipMemset((void*)newNodesGPU, false, sizeof(int) * numAllPairNodes);
hipMemset((void*)numOutArcsGPU, 0, sizeof(int) * numAllPairNodes);
hipMemset((void*)numInArcsGPU, 0, sizeof(int) * numAllPairNodes);
hipMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
{
const int gridSize = div_up(numAllPairNodes, NT);
hipLaunchKernelGGL(( secondPassInitKernel), dim3(gridSize), dim3(NT), 0, 0, graphDP1GPU, graphDP2GPU, reachableGPU,
toExploreGPU, newNodesGPU, numNodesFirst, numAllPairNodes);
}
// This is the outer control loop that would spawn DP kernels
while(checkAnyTrueGPU(toExploreGPU, numAllPairNodes)) {
int* toExploreNodePairFirstGPU;
int* toExploreNodePairSecondGPU;
size_t numToExploreNodePair;
// Convert bits set in toExplore to node pairs
std::tie(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, numToExploreNodePair) =
convertToNodePairGPU(toExploreGPU, numAllPairNodes, numNodesFirst);
int* arcCrossProductIndexGPU;
int* toExploreNumArcsFirstGPU;
int* toExploreNumArcsSecondGPU;
std::tie(arcCrossProductIndexGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU) =
calculateArcCrossProductOffsetGPU(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
numToExploreNodePair, graphDP1GPU, graphDP2GPU, false);
int* arcCrossProductOffsetGPU;
size_t numArcCrossProductOffset;
int totalArcs;
std::tie(arcCrossProductOffsetGPU, numArcCrossProductOffset, totalArcs) =
prefixSumScanGPU(arcCrossProductIndexGPU, numToExploreNodePair, true);
assert(numArcCrossProductOffset == (numToExploreNodePair + 1));
hipFree(arcCrossProductIndexGPU);
// Reset so pristine state for next frontier to explore
hipMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
if (totalArcs > 0) {
const int gridSize = div_up(totalArcs, NT);
hipLaunchKernelGGL(( computeValidNodeAndArcKernel), dim3(gridSize), dim3(NT), 0, 0, graphDP1GPU, graphDP2GPU,
arcCrossProductOffsetGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, reachableGPU,
epsilonMatchedGPU, numNodesFirst, totalArcs, numArcCrossProductOffset,
toExploreGPU, newNodesGPU, numInArcsGPU, numOutArcsGPU);
}
hipFree(toExploreNodePairFirstGPU);
hipFree(toExploreNodePairSecondGPU);
hipFree(arcCrossProductOffsetGPU);
hipFree(toExploreNumArcsFirstGPU);
hipFree(toExploreNumArcsSecondGPU);
}
//////////////////////////////////////////////////////////////////////////
// Step 3: Generate offsets for nodes and arcs in combined graph
//////////////////////////////////////////////////////////////////////////
GraphDataParallelGPU newGraphDPGPU;
int totalNodes;
int* newNodesOffsetGPU;
size_t numElements;
std::tie(newNodesOffsetGPU, numElements, totalNodes) = prefixSumScanGPU(newNodesGPU, numAllPairNodes, false);
assert(numElements == numAllPairNodes);
newGraphDPGPU.numNodes = totalNodes;
hipMalloc((void **)(&(newGraphDPGPU.start)), sizeof(int) * totalNodes);
hipMalloc((void **)(&(newGraphDPGPU.accept)), sizeof(int) * totalNodes);
hipMalloc((void **)(&(newGraphDPGPU.inArcOffset)), sizeof(int) * totalNodes);
hipMalloc((void **)(&(newGraphDPGPU.outArcOffset)), sizeof(int) * totalNodes);
// Generate offsets for nodes and arcs
{
const int NT = 128;
const int gridSize = div_up(numAllPairNodes, NT);
hipLaunchKernelGGL(( calculateNumArcsKernel), dim3(gridSize), dim3(NT), 0, 0, newNodesGPU, newNodesOffsetGPU,
numInArcsGPU, numOutArcsGPU, newGraphDPGPU.inArcOffset, newGraphDPGPU.outArcOffset,
numAllPairNodes, totalNodes);
}
int totalInArcs;
int totalOutArcs;
int* inArcOffsetGPU;
int* outArcOffsetGPU;
std::tie(inArcOffsetGPU, numElements, totalInArcs) = prefixSumScanGPU(newGraphDPGPU.inArcOffset, totalNodes, false);
assert(numElements == totalNodes);
std::tie(outArcOffsetGPU, numElements, totalOutArcs) = prefixSumScanGPU(newGraphDPGPU.outArcOffset, totalNodes, false);
assert(numElements == totalNodes);
assert(totalInArcs == totalOutArcs);
newGraphDPGPU.numArcs = totalOutArcs;
hipMalloc((void **)(&(newGraphDPGPU.inArcs)), sizeof(int) * totalInArcs);
hipMalloc((void **)(&(newGraphDPGPU.outArcs)), sizeof(int) * totalOutArcs);
hipMalloc((void **)(&(newGraphDPGPU.ilabels)), sizeof(int) * totalOutArcs);
hipMalloc((void **)(&(newGraphDPGPU.olabels)), sizeof(int) * totalOutArcs);
hipMalloc((void **)(&(newGraphDPGPU.srcNodes)), sizeof(int) * totalOutArcs);
hipMalloc((void **)(&(newGraphDPGPU.dstNodes)), sizeof(int) * totalOutArcs);
hipMalloc((void **)(&(newGraphDPGPU.weights)), sizeof(float) * totalOutArcs);
hipMemcpy((void *)(newGraphDPGPU.inArcOffset), (void *)(inArcOffsetGPU), sizeof(int) * totalNodes, hipMemcpyDeviceToDevice);
hipMemcpy((void *)(newGraphDPGPU.outArcOffset), (void *)(outArcOffsetGPU), sizeof(int) * totalNodes, hipMemcpyDeviceToDevice);
// std::cout << "totalInArcs " << totalInArcs << " totalOutArcs " << totalOutArcs << std::endl;
// SOA for gradInfo
std::pair<std::vector<int>, std::vector<int>> gradInfo;
gradInfo.first.resize(totalOutArcs);
gradInfo.second.resize(totalOutArcs);
int *gradInfoFirstGPU;
int *gradInfoSecondGPU;
hipMalloc((void **)(&gradInfoFirstGPU), sizeof(int) * totalOutArcs);
hipMalloc((void **)(&gradInfoSecondGPU), sizeof(int) * totalOutArcs);
//////////////////////////////////////////////////////////////////////////
// Step 4: Generate nodes and arcs in combined graph
//////////////////////////////////////////////////////////////////////////
int* newNodesVisitedGPU;
hipMalloc((void **)(&newNodesVisitedGPU), sizeof(int) * numAllPairNodes);
hipMemset((void*)newNodesVisitedGPU, false, sizeof(int) * numAllPairNodes);
// Reset so pristine state for next frontier to explore
hipMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
hipMemset((void *)(newGraphDPGPU.start), false, sizeof(int) * totalNodes);
hipMemset((void *)(newGraphDPGPU.accept), false, sizeof(int) * totalNodes);
{
const int gridSize = div_up(numAllPairNodes, NT);
hipLaunchKernelGGL(( fourthPassInitKernel), dim3(gridSize), dim3(NT), 0, 0, graphDP1GPU, graphDP2GPU, reachableGPU,
newNodesOffsetGPU, newGraphDPGPU, toExploreGPU, newNodesVisitedGPU,
numNodesFirst, numAllPairNodes);
}
// This is the outer control loop that would spawn DP kernels
while(checkAnyTrueGPU(toExploreGPU, numAllPairNodes)) {
int* toExploreNodePairFirstGPU;
int* toExploreNodePairSecondGPU;
size_t numToExploreNodePair;
// Convert bits set in toExplore to node pairs
std::tie(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, numToExploreNodePair) =
convertToNodePairGPU(toExploreGPU, numAllPairNodes, numNodesFirst);
int* arcCrossProductIndexGPU;
int* toExploreNumArcsFirstGPU;
int* toExploreNumArcsSecondGPU;
std::tie(arcCrossProductIndexGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU) =
calculateArcCrossProductOffsetGPU(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
numToExploreNodePair, graphDP1GPU, graphDP2GPU, false);
int* arcCrossProductOffsetGPU;
size_t numArcCrossProductOffset;
int totalArcs;
std::tie(arcCrossProductOffsetGPU, numArcCrossProductOffset, totalArcs) =
prefixSumScanGPU(arcCrossProductIndexGPU, numToExploreNodePair, true);
assert(numArcCrossProductOffset == (numToExploreNodePair + 1));
hipFree(arcCrossProductIndexGPU);
// Reset so pristine state for next frontier to explore
hipMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
if (totalArcs > 0) {
const int gridSize = div_up(totalArcs, NT);
hipLaunchKernelGGL(( generateNodeAndArcKernel), dim3(gridSize), dim3(NT), 0, 0, graphDP1GPU, graphDP2GPU,
arcCrossProductOffsetGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, reachableGPU,
epsilonMatchedGPU, numNodesFirst, totalArcs, numArcCrossProductOffset,
newGraphDPGPU, toExploreGPU, gradInfoFirstGPU, gradInfoSecondGPU,
newNodesOffsetGPU, newNodesVisitedGPU);
}
hipFree(toExploreNodePairFirstGPU);
hipFree(toExploreNodePairSecondGPU);
hipFree(arcCrossProductOffsetGPU);
hipFree(toExploreNumArcsFirstGPU);
hipFree(toExploreNumArcsSecondGPU);
}
// Reset incremented offsets to original value
hipMemcpy((void *)(newGraphDPGPU.inArcOffset), (void *)(inArcOffsetGPU), sizeof(int) * newGraphDPGPU.numNodes, hipMemcpyDeviceToDevice);
hipMemcpy((void *)(newGraphDPGPU.outArcOffset), (void *)(outArcOffsetGPU), sizeof(int) * newGraphDPGPU.numNodes, hipMemcpyDeviceToDevice);
// Copy graph on GPU to CPU
GraphDataParallel newGraphDP;
newGraphDP.start.resize(totalNodes);
newGraphDP.accept.resize(totalNodes);
newGraphDP.inArcOffset.resize(totalNodes);
newGraphDP.outArcOffset.resize(totalNodes);
newGraphDP.inArcs.resize(totalInArcs);
newGraphDP.outArcs.resize(totalOutArcs);
newGraphDP.ilabels.resize(totalOutArcs);
newGraphDP.olabels.resize(totalOutArcs);
newGraphDP.srcNodes.resize(totalOutArcs);
newGraphDP.dstNodes.resize(totalOutArcs);
newGraphDP.weights.resize(totalOutArcs);
hipMemcpy((void *)(newGraphDP.accept.data()), (void *)(newGraphDPGPU.accept), sizeof(int) * newGraphDPGPU.numNodes, hipMemcpyDeviceToHost);
hipMemcpy((void *)(newGraphDP.start.data()), (void *)(newGraphDPGPU.start), sizeof(int) * newGraphDPGPU.numNodes, hipMemcpyDeviceToHost);
hipMemcpy((void *)(newGraphDP.inArcOffset.data()), (void *)(newGraphDPGPU.inArcOffset), sizeof(int) * newGraphDPGPU.numNodes, hipMemcpyDeviceToHost);
hipMemcpy((void *)(newGraphDP.outArcOffset.data()), (void *)(newGraphDPGPU.outArcOffset), sizeof(int) * newGraphDPGPU.numNodes, hipMemcpyDeviceToHost);
hipMemcpy((void *)(newGraphDP.inArcs.data()), (void *)(newGraphDPGPU.inArcs), sizeof(int) * newGraphDPGPU.numArcs, hipMemcpyDeviceToHost);
hipMemcpy((void *)(newGraphDP.outArcs.data()), (void *)(newGraphDPGPU.outArcs), sizeof(int) * newGraphDPGPU.numArcs, hipMemcpyDeviceToHost);
hipMemcpy((void *)(newGraphDP.ilabels.data()), (void *)(newGraphDPGPU.ilabels), sizeof(int) * newGraphDPGPU.numArcs, hipMemcpyDeviceToHost);
hipMemcpy((void *)(newGraphDP.olabels.data()), (void *)(newGraphDPGPU.olabels), sizeof(int) * newGraphDPGPU.numArcs, hipMemcpyDeviceToHost);
hipMemcpy((void *)(newGraphDP.srcNodes.data()), (void *)(newGraphDPGPU.srcNodes), sizeof(int) * newGraphDPGPU.numArcs, hipMemcpyDeviceToHost);
hipMemcpy((void *)(newGraphDP.dstNodes.data()), (void *)(newGraphDPGPU.dstNodes), sizeof(int) * newGraphDPGPU.numArcs, hipMemcpyDeviceToHost);
hipMemcpy((void *)(newGraphDP.weights.data()), (void *)(newGraphDPGPU.weights), sizeof(float) * newGraphDPGPU.numArcs, hipMemcpyDeviceToHost);
assert(newGraphDPGPU.numArcs == totalOutArcs);
hipMemcpy((void *)(gradInfo.first.data()), (void *)(gradInfoFirstGPU), sizeof(int) * totalOutArcs, hipMemcpyDeviceToHost);
hipMemcpy((void *)(gradInfo.second.data()), (void *)(gradInfoSecondGPU), sizeof(int) * totalOutArcs, hipMemcpyDeviceToHost);
hipFree(reachableGPU);
hipFree(epsilonMatchedGPU);
hipFree(toExploreGPU);
hipFree(newNodesGPU);
hipFree(numOutArcsGPU);
hipFree(numInArcsGPU);
hipFree(newNodesOffsetGPU);
hipFree(inArcOffsetGPU);
hipFree(outArcOffsetGPU);
hipFree(gradInfoFirstGPU);
hipFree(gradInfoSecondGPU);
hipFree(newNodesVisitedGPU);
hipFree(newGraphDPGPU.start);
hipFree(newGraphDPGPU.accept);
hipFree(newGraphDPGPU.inArcOffset);
hipFree(newGraphDPGPU.outArcOffset);
hipFree(newGraphDPGPU.inArcs);
hipFree(newGraphDPGPU.outArcs);
hipFree(newGraphDPGPU.ilabels);
hipFree(newGraphDPGPU.olabels);
hipFree(newGraphDPGPU.srcNodes);
hipFree(newGraphDPGPU.dstNodes);
hipFree(newGraphDPGPU.weights);
newGraphDPGPU.numNodes = 0;
newGraphDPGPU.numArcs = 0;
if (0)
{
std::cout << "nodes " << newGraphDP.inArcOffset.size() << std::endl;
std::cout << "nodes " << newGraphDP.outArcOffset.size() << std::endl;
std::cout << "start" << std::endl;
for (auto i : newGraphDP.start) {
std::cout << i << std::endl;
}
std::cout << "accept" << std::endl;
for (auto i : newGraphDP.accept) {
std::cout << i << std::endl;
}
std::cout << "inArcOffset" << std::endl;
for (auto i : newGraphDP.inArcOffset) {
std::cout << i << std::endl;
}
std::cout << "outArcOffset" << std::endl;
for (auto i : newGraphDP.outArcOffset) {
std::cout << i << std::endl;
}
std::cout << "inArcs" << std::endl;
for (auto i : newGraphDP.inArcs) {
std::cout << i << std::endl;
}
std::cout << "outArcs" << std::endl;
for (auto i : newGraphDP.outArcs) {
std::cout << i << std::endl;
}
std::cout << "ilabels" << std::endl;
for (auto i : newGraphDP.ilabels) {
std::cout << i << std::endl;
}
std::cout << "olabels" << std::endl;
for (auto i : newGraphDP.olabels) {
std::cout << i << std::endl;
}
std::cout << "srcNodes" << std::endl;
for (auto i : newGraphDP.srcNodes) {
std::cout << i << std::endl;
}
std::cout << "dstNodes" << std::endl;
for (auto i : newGraphDP.dstNodes) {
std::cout << i << std::endl;
}
std::cout << "weights" << std::endl;
for (auto i : newGraphDP.weights) {
std::cout << i << std::endl;
}
}
// Not needed since the CPU data is never incremented
// Shift offset values back down after adding arcs to newGraphDP
// The offset values got converted from exclusive prefix sum to inclusive
// Need to convert them back to exclusive prefix sum by starting with 0
// and shifting to right by 1
// for (int i = newGraphDP.outArcOffset.size() - 1; i >= 0; --i) {
// newGraphDP.outArcOffset[i] = i == 0 ? 0 : newGraphDP.outArcOffset[i - 1];
// newGraphDP.inArcOffset[i] = i == 0 ? 0 : newGraphDP.inArcOffset[i - 1];
// }
// Convert back and add in autograd metadata
auto nGraph = convertFromDataParallel(newGraphDP);
nGraph.setInputs({first, second});
if (0)
{
std::cout << "numNodes " << nGraph.numNodes() << std::endl;
std::cout << "accept" << std::endl;
for (auto i : nGraph.accept()) {
std::cout << i << std::endl;
}
std::cout << "start" << std::endl;
for (auto i : nGraph.start()) {
std::cout << i << std::endl;
}
std::cout << "numIn" << std::endl;
for (int i = 0; i < nGraph.numNodes(); ++i) {
std::cout << nGraph.numIn(i) << std::endl;
}
std::cout << "numOut" << std::endl;
for (int i = 0; i < nGraph.numNodes(); ++i) {
std::cout << nGraph.numOut(i) << std::endl;
}
}
// Convert gradInfo SOA to AOS
std::vector<std::pair<int, int>> gradInfoAOS;
for (int i = 0; i < gradInfo.first.size(); ++i) {
gradInfoAOS.emplace_back(gradInfo.first[i], gradInfo.second[i]);
}
// TODO eliminate this copy pasta.
auto gradFunc = [gradInfo = std::move(gradInfoAOS)](
std::vector<Graph>& inputs, Graph deltas) {
// In this case the arc's parents are always from the
// first and second input graphs respectively.
bool calcGrad1 = inputs[0].calcGrad();
bool calcGrad2 = inputs[1].calcGrad();
auto grad1 = calcGrad1 ? std::vector<float>(inputs[0].numArcs(), 0.0)
: std::vector<float>{};
auto grad2 = calcGrad2 ? std::vector<float>(inputs[1].numArcs(), 0.0)
: std::vector<float>{};
for (int i = 0; i < gradInfo.size(); i++) {
auto arcGrad = deltas.weight(i);
auto& arcs = gradInfo[i];
if (calcGrad1 && arcs.first >= 0) {
grad1[arcs.first] += arcGrad;
}
if (calcGrad2 && arcs.second >= 0) {
grad2[arcs.second] += arcGrad;
}
}
inputs[0].addGrad(std::move(grad1));
inputs[1].addGrad(std::move(grad2));
};
nGraph.setGradFunc(std::move(gradFunc));
return nGraph;
}
} // namespace dataparallel
} // namespace detail
} // namespace gtn
/*
if (0)
{
int *aCPGPU;
int *tEN1GPU;
int *tEN2GPU;
std::tie(aCPGPU, tEN1GPU, tEN2GPU) = calculateArcCrossProductOffsetGPU(
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
toExploreNodePair.first.size(), graphDP1GPU, graphDP2GPU, true);
std::vector<int> aCP(numToExploreNodePair);
std::vector<int> tEN1(numToExploreNodePair);
std::vector<int> tEN2(numToExploreNodePair);
hipMemcpy((void *)(aCP.data()), (void *)(aCPGPU), sizeof(int) * numToExploreNodePair, hipMemcpyDeviceToHost);
hipMemcpy((void *)(tEN1.data()), (void *)(tEN1GPU), sizeof(int) * numToExploreNodePair, hipMemcpyDeviceToHost);
hipMemcpy((void *)(tEN2.data()), (void *)(tEN2GPU), sizeof(int) * numToExploreNodePair, hipMemcpyDeviceToHost);
assert(std::equal(arcCrossProductOffset.begin(), arcCrossProductOffset.end(), aCP.begin()));
assert(std::equal(toExploreNumArcs.first.begin(), toExploreNumArcs.first.end(), tEN1.begin()));
assert(std::equal(toExploreNumArcs.second.begin(), toExploreNumArcs.second.end(), tEN2.begin()));
hipFree(aCPGPU);
hipFree(tEN1GPU);
hipFree(tEN2GPU);
}*/
/*
if(0)
{
std::vector<int> tVec(arcCrossProductOffset);
const size_t numElts = tVec.size();
int* tVecGPU;
hipMalloc((void **)(&tVecGPU), sizeof(int) * numElts);
hipMemcpy((void *)tVecGPU, (void *)(tVec.data()), sizeof(int) * numElts, hipMemcpyHostToDevice);
const int totalArcs = prefixSumScan(tVec, true);
int* tVecScanGPU;
size_t tVecScanElts;
int tArcsGPU;
std::tie(tVecScanGPU, tVecScanElts, tArcsGPU) = prefixSumScanGPU(tVecGPU, numElts, true);
assert(tVec.size() == (numElts + 1));
assert(tVecScanElts == (numElts + 1));
std::vector<int> tVecNew(tVec.size());
hipMemcpy((void *)(tVecNew.data()), (void *)(tVecScanGPU), sizeof(int) * tVecScanElts, hipMemcpyDeviceToHost);
assert(totalArcs == tArcsGPU);
assert(std::equal(tVec.begin(), tVec.end(), tVecNew.begin()));
hipFree(tVecGPU);
hipFree(tVecScanGPU);
}*/
/*
if (0)
{
int* tEN1GPU;
int* tEN2GPU;
size_t nTEN;
std::tie(tEN1GPU, tEN2GPU, nTEN) = convertToNodePairGPU(toExploreGPU, numAllPairNodes, numNodesFirst);
assert(nTEN == toExploreNodePair.first.size());
std::vector<int> tEN1(nTEN);
std::vector<int> tEN2(nTEN);
hipMemcpy((void *)(tEN1.data()), (void *)(tEN1GPU), sizeof(int) * nTEN, hipMemcpyDeviceToHost);
hipMemcpy((void *)(tEN2.data()), (void *)(tEN2GPU), sizeof(int) * nTEN, hipMemcpyDeviceToHost);
assert(std::equal(toExploreNodePair.first.begin(), toExploreNodePair.first.end(), tEN1.begin()));
assert(std::equal(toExploreNodePair.second.begin(), toExploreNodePair.second.end(), tEN2.begin()));
hipFree(tEN1GPU);
hipFree(tEN2GPU);
}*/
/*
inline std::pair<int, int> OneDToTwoDIndex(int n, int n1Extent) {
assert(n1Extent > 0);
const int n2 = n / n1Extent;
const int n1 = n % n1Extent;
return std::make_pair(n1, n2);
}
bool checkAnyTrue(const std::vector<int>& flags) {
// Potentially wasteful - but GPU friendly
return std::accumulate(flags.begin(), flags.end(), 0) > 0 ? true : false;
}*/
/*
// Convert int array to pairs for true flags
std::pair<std::vector<int>, std::vector<int>> convertToNodePair(
const std::vector<int>& flags,
int extent) {
std::vector<int> indices(flags);
const int numValidNodes = prefixSumScan(indices, false);
std::vector<int> toExploreNodePairFirst(numValidNodes);
std::vector<int> toExploreNodePairSecond(numValidNodes);
// No loop dependence
for (size_t i = 0; i < flags.size(); ++i) {
if (flags[i] == true) {
std::pair<int, int> node = OneDToTwoDIndex(i, extent);
const int index = indices[i];
assert(index >= 0);
assert(index < numValidNodes);
toExploreNodePairFirst[index] = node.first;
toExploreNodePairSecond[index] = node.second;
}
}
return std::make_pair(toExploreNodePairFirst, toExploreNodePairSecond);
}*/
// Takes a pair of nodes, where each member of pair comes from a different
// graph and calculate a vector of number of arcs in the cross product of
// arcs outgoing from each pair.
// This should be a kernel call
/*
std::tuple<std::vector<int>, std::pair<std::vector<int>, std::vector<int>>>
calculateArcCrossProductOffset(
const std::pair<std::vector<int>, std::vector<int>>& toExploreNodePair,
const GraphDataParallel& graphDP1,
const GraphDataParallel& graphDP2,
bool inOrOutArc) {
assert(toExploreNodePair.first.size() == toExploreNodePair.second.size());
std::pair<std::vector<int>, std::vector<int>> toExploreNumArcs;
toExploreNumArcs.first.resize(toExploreNodePair.first.size());
toExploreNumArcs.second.resize(toExploreNodePair.first.size());
std::vector<int> arcCrossProductOffset(toExploreNodePair.first.size());
// No dependence between iterations
for (size_t i = 0; i < toExploreNodePair.first.size(); ++i) {
int node = (toExploreNodePair.first)[i];
// Special case if it is the last node. Then the offset becomes
// the number of arcs
const int inArcOffsetGraph1 = ((node + 1) == graphDP1.inArcOffset.size())
? graphDP1.inArcs.size()
: graphDP1.inArcOffset[node + 1];
const int outArcOffsetGraph1 = ((node + 1) == graphDP1.outArcOffset.size())
? graphDP1.outArcs.size()
: graphDP1.outArcOffset[node + 1];
const int numArcsFirst = inOrOutArc
? inArcOffsetGraph1 - graphDP1.inArcOffset[node]
: outArcOffsetGraph1 - graphDP1.outArcOffset[node];
node = (toExploreNodePair.second)[i];
// Special case if it is the last node. Then the offset becomes
// the number of arcs
const int inArcOffsetGraph2 = ((node + 1) == graphDP2.inArcOffset.size())
? graphDP2.inArcs.size()
: graphDP2.inArcOffset[node + 1];
const int outArcOffsetGraph2 = ((node + 1) == graphDP2.outArcOffset.size())
? graphDP2.outArcs.size()
: graphDP2.outArcOffset[node + 1];
const int numArcsSecond = inOrOutArc
? inArcOffsetGraph2 - graphDP2.inArcOffset[node]
: outArcOffsetGraph2 - graphDP2.outArcOffset[node];
(toExploreNumArcs.first)[i] = numArcsFirst;
(toExploreNumArcs.second)[i] = numArcsSecond;
// Even when numArcsFirst or numArcsSecond is 0 we have to consider
// the case when the other graph has arcs with epsilon label
if (numArcsFirst != 0 && numArcsSecond != 0) {
arcCrossProductOffset[i] = numArcsFirst * numArcsSecond;
} else if (numArcsFirst != 0 && numArcsSecond == 0) {
arcCrossProductOffset[i] = numArcsFirst;
} else if (numArcsFirst == 0 && numArcsSecond != 0) {
arcCrossProductOffset[i] = numArcsSecond;
} else {
arcCrossProductOffset[i] = 0;
}
}
return std::make_tuple(arcCrossProductOffset, toExploreNumArcs);
}*/
| 4c1754ec85a59fc340a58526f0cc7e270d7077bc.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <algorithm>
#include <cassert>
#include <numeric>
#include <vector>
#include <tuple>
#include <iostream>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include "parallel_compose.h"
#include "prefix_scan.h"
namespace gtn {
namespace detail {
namespace dataparallel {
namespace {
struct GraphDataParallelGPU {
size_t numNodes;
size_t numArcs;
// True if a node is accept or start, false otherwise
int* accept;
int* start;
// One value per node - i-th value corresponds to i-th node
// Last element is the total number of arcs, so that
// each element and its neighbor forms a range
int* inArcOffset;
int* outArcOffset;
// One value per arc
int* inArcs;
int* outArcs;
// One value per arc
// i-th value corresponds to i-th arc
int* ilabels;
int* olabels;
int* srcNodes;
int* dstNodes;
float* weights;
};
struct nodeAndArcPairGPU {
int2 nodePair;
int2 arcPair;
int2 checkEpsilonArcPair;
bool checkArcPair;
bool isValid;
};
inline int div_up(int x, int y) {
return (x + y - 1) / y;
}
__device__ __host__
inline int TwoDToOneDIndex(int n1, int n2, int n1Extent) {
assert(n1 < n1Extent);
return n1 + n2 * n1Extent;
}
__device__
inline int2 OneDToTwoDIndexGPU(int n, int n1Extent) {
assert(n1Extent > 0);
const int n2 = n / n1Extent;
const int n1 = n % n1Extent;
return make_int2(n1, n2);
}
bool checkAnyTrueGPU(const int* flags, int numFlags) {
thrust::device_ptr<const int> tPtr(flags);
const int sum = thrust::reduce(tPtr, tPtr + numFlags, int(0));
return (sum > 0);
}
std::tuple<int*, size_t, int> prefixSumScanGPU(const int* input, size_t numElts, bool appendSum) {
const size_t scanNumElts = appendSum ? numElts + 1 : numElts;
int *output;
cudaMalloc((void **)(&(output)), sizeof(int) * scanNumElts);
cudaMemcpy((void *)(output), (void *)(input), sizeof(int) * numElts, cudaMemcpyDeviceToDevice);
int sum = 0;
if (numElts > 0) {
thrust::device_ptr<int> tPtr(output);
thrust::exclusive_scan(tPtr, tPtr + numElts, tPtr);
int lastElementInput;
int lastElementOutput;
cudaMemcpy((void *)(&lastElementInput), (void *)(&(input[numElts-1])), sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(&lastElementOutput), (void *)(&(output[numElts-1])), sizeof(int), cudaMemcpyDeviceToHost);
sum = lastElementInput + lastElementOutput;
}
if (appendSum) {
assert(scanNumElts > 0);
cudaMemcpy((void *)(&(output[scanNumElts-1])), (void *)(&sum), sizeof(int), cudaMemcpyHostToDevice);
}
return std::make_tuple(output, scanNumElts, sum);
}
// Map thread id to corresponding node and arc pair
// Also map thread id to two flags checkEpsilonArcPair.first,
// checkEpsilonArcPair.second When checkEpsilonArcPair.first is set,
// corresponding tid will check for arcs with epsilon arcs in the node from
// first graph Same logic happens for checkEpsilonArcPair.second Search to find
// which node pair this tid will fall into Linear search for now
// (arcCrossProductOffset is sorted by definition)
__device__
nodeAndArcPairGPU computeNodeAndArcPair(
int tid,
size_t numArcCrossProductOffset,
const int* arcCrossProductOffset,
const int* toExploreNumArcsFirst,
const int* toExploreNumArcsSecond,
const int* toExploreNodePairFirst,
const int* toExploreNodePairSecond) {
nodeAndArcPairGPU result;
result.checkArcPair = false;
result.checkEpsilonArcPair = make_int2(false, false);
result.isValid = false;
// There should be at least two values to form a range
assert(numArcCrossProductOffset >= 2);
for (size_t i = 0; i < numArcCrossProductOffset - 1; ++i) {
const int lVal = arcCrossProductOffset[i];
const int rVal = arcCrossProductOffset[i + 1];
if ((lVal <= tid) && (tid < rVal)) {
result.isValid = true;
result.nodePair = make_int2(
toExploreNodePairFirst[i], toExploreNodePairSecond[i]);
// The range of idx is from
// [0, toExploreNumArcsFirst[i] * toExploreNumArcsSecond[i])
const int idx = tid - lVal;
const int numArcs = rVal - lVal;
assert(idx >= 0);
assert(idx < numArcs);
assert(numArcs > 0);
const int arcProd =
toExploreNumArcsFirst[i] * toExploreNumArcsSecond[i];
if (numArcs == arcProd) {
result.checkArcPair = true;
// We map the tids to 2D grid where the
// x-axis is toExploreNumArcsFirst[i] (row)
// y-axis is toExploreNumArcsSecond[i] (column)
assert(toExploreNumArcsFirst[i] > 0);
result.arcPair = make_int2(
idx % toExploreNumArcsFirst[i],
idx / toExploreNumArcsFirst[i]);
// Pick the tids from the first row since we need only one
// tid per arc of the node from the first graph to check for
// epsilon
if (idx < toExploreNumArcsFirst[i]) {
result.checkEpsilonArcPair.x = true;
}
// Pick the tids from the first column since we need only one
// tid per arc of the node from the first graph to check for
// epsilon
if ((idx % toExploreNumArcsFirst[i]) == 0) {
result.checkEpsilonArcPair.y = true;
}
} else if ((arcProd == 0) && (numArcs == toExploreNumArcsFirst[i])) {
// TODO: Likely not the brightest idea to use -1 as sentinel
result.arcPair = make_int2(idx, -1);
result.checkEpsilonArcPair.x = true;
} else if ((arcProd == 0) && (numArcs == toExploreNumArcsSecond[i])) {
// TODO: Likely not the brightest idea to use -1 as sentinel
result.arcPair = make_int2(-1, idx);
result.checkEpsilonArcPair.y = true;
}
break;
}
}
return result;
}
__global__
void calculateArcCrossProductOffsetKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* toExploreNodePairFirstGPU,
const int* toExploreNodePairSecondGPU,
int* toExploreNumArcsFirstGPU,
int* toExploreNumArcsSecondGPU,
int* arcCrossProductOffsetGPU,
size_t numToExploreNodePair,
bool inOrOutArc) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numToExploreNodePair) {
int node = toExploreNodePairFirstGPU[gTid];
// Special case if it is the last node. Then the offset becomes
// the number of arcs
const int inArcOffsetGraph1 = ((node + 1) == graphDP1GPU.numNodes)
? graphDP1GPU.numArcs
: graphDP1GPU.inArcOffset[node + 1];
const int outArcOffsetGraph1 = ((node + 1) == graphDP1GPU.numNodes)
? graphDP1GPU.numArcs
: graphDP1GPU.outArcOffset[node + 1];
const int numArcsFirst = inOrOutArc
? inArcOffsetGraph1 - graphDP1GPU.inArcOffset[node]
: outArcOffsetGraph1 - graphDP1GPU.outArcOffset[node];
node = toExploreNodePairSecondGPU[gTid];
// Special case if it is the last node. Then the offset becomes
// the number of arcs
const int inArcOffsetGraph2 = ((node + 1) == graphDP2GPU.numNodes)
? graphDP2GPU.numArcs
: graphDP2GPU.inArcOffset[node + 1];
const int outArcOffsetGraph2 = ((node + 1) == graphDP2GPU.numNodes)
? graphDP2GPU.numArcs
: graphDP2GPU.outArcOffset[node + 1];
const int numArcsSecond = inOrOutArc
? inArcOffsetGraph2 - graphDP2GPU.inArcOffset[node]
: outArcOffsetGraph2 - graphDP2GPU.outArcOffset[node];
toExploreNumArcsFirstGPU[gTid] = numArcsFirst;
toExploreNumArcsSecondGPU[gTid] = numArcsSecond;
// Even when numArcsFirst or numArcsSecond is 0 we have to consider
// the case when the other graph has arcs with epsilon label
if (numArcsFirst != 0 && numArcsSecond != 0) {
arcCrossProductOffsetGPU[gTid] = numArcsFirst * numArcsSecond;
} else if (numArcsFirst != 0 && numArcsSecond == 0) {
arcCrossProductOffsetGPU[gTid] = numArcsFirst;
} else if (numArcsFirst == 0 && numArcsSecond != 0) {
arcCrossProductOffsetGPU[gTid] = numArcsSecond;
} else {
arcCrossProductOffsetGPU[gTid] = 0;
}
}
}
// Takes a pair of nodes, where each member of pair comes from a different
// graph and calculate a vector of number of arcs in the cross product of
// arcs outgoing from each pair.
// This should be a kernel call
std::tuple<int*, int*, int*>
calculateArcCrossProductOffsetGPU(
const int* toExploreNodePairFirstGPU,
const int* toExploreNodePairSecondGPU,
size_t numToExploreNodePair,
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
bool inOrOutArc) {
int* toExploreNumArcsFirstGPU;
int* toExploreNumArcsSecondGPU;
int* arcCrossProductOffsetGPU;
cudaMalloc((void **)(&(toExploreNumArcsFirstGPU)), sizeof(int) * numToExploreNodePair);
cudaMalloc((void **)(&(toExploreNumArcsSecondGPU)), sizeof(int) * numToExploreNodePair);
cudaMalloc((void **)(&(arcCrossProductOffsetGPU)), sizeof(int) * numToExploreNodePair);
const int NT = 128;
const int gridSize = div_up(numToExploreNodePair, NT);
calculateArcCrossProductOffsetKernel<<<gridSize, NT, 0, 0>>>(
graphDP1GPU, graphDP2GPU, toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU, arcCrossProductOffsetGPU,
numToExploreNodePair, inOrOutArc);
return std::make_tuple(arcCrossProductOffsetGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU);
}
// This function needs to be thread safe since multiple threads can
// can call it and they will overlap on curIdx and dstIdx
__device__
void calculateNumArcsAndNodesToExplore(
int curIdx,
int dstIdx,
const int* reachable,
int* newNodes,
int* toExplore,
int* numOutArcs,
int* numInArcs) {
if (reachable[dstIdx]) {
// Atomic test and set for newNodes
/*
int oldVal = newNodes[dstIdx];
if (!newNodes[dstIdx]) {
newNodes[dstIdx] = true;
}*/
int oldVal = atomicCAS(&(newNodes[dstIdx]), false, true);
if (!oldVal) {
toExplore[dstIdx] = true;
}
// These are atomic increments
// numOutArcs[curIdx]++;
// numInArcs[dstIdx]++;
atomicAdd(&(numOutArcs[curIdx]), 1);
atomicAdd(&(numInArcs[dstIdx]), 1);
// printf("cidx %d didx %d\n", curIdx, dstIdx);
// printf("no %d ni %d\n", numOutArcs[curIdx], numInArcs[dstIdx]);
}
}
// This function needs to be thread safe since multiple threads can
// can call it
__device__
void generateCombinedGraphNodesAndArcs(
int dstIdx,
int curIdx,
const int2& arcPair,
const int2& dstNodeStartAndAccept,
const int* reachable,
const int* newNodesOffset,
int* newNodesVisited,
int* toExplore,
int* gradInfoFirst,
int* gradInfoSecond,
GraphDataParallelGPU& newGraphDP,
int ilabel,
int olabel,
float weight) {
if (reachable[dstIdx]) {
// Atomic test and set for newNodesVisited
/*
int oldVal = newNodesVisited[dstIdx];
if (!newNodesVisited[dstIdx]) {
newNodesVisited[dstIdx] = true;
}*/
int oldVal = atomicCAS(&(newNodesVisited[dstIdx]), false, true);
if (!oldVal) {
toExplore[dstIdx] = true;
}
// Set accept and start nodes
// I think I only need it for dst nodes and src nodes
// Note: Multiple threads can have the same dstIdx and write to the same
// location and collide. This _should_ be fine since they are going
// to write the same value
newGraphDP.start[newNodesOffset[dstIdx]] = dstNodeStartAndAccept.x;
newGraphDP.accept[newNodesOffset[dstIdx]] = dstNodeStartAndAccept.y;
// Both of these increments are atomic
// int inArcIdx = newGraphDP.inArcOffset[newNodesOffset[dstIdx]]++;
// int outArcIdx = newGraphDP.outArcOffset[newNodesOffset[curIdx]]++;
int inArcIdx = atomicAdd(&(newGraphDP.inArcOffset[newNodesOffset[dstIdx]]), 1);
int outArcIdx = atomicAdd(&(newGraphDP.outArcOffset[newNodesOffset[curIdx]]), 1);
// printf("dstIdx %d curIdx %d\n", dstIdx, curIdx);
// printf("inArcIdx %d outArcIdx %d\n", inArcIdx, outArcIdx);
// outArcIdx is also the arc identifier
newGraphDP.outArcs[outArcIdx] = outArcIdx;
newGraphDP.inArcs[inArcIdx] = outArcIdx;
// Fill in everything else for this arc
newGraphDP.ilabels[outArcIdx] = ilabel;
newGraphDP.olabels[outArcIdx] = olabel;
newGraphDP.srcNodes[outArcIdx] = newNodesOffset[curIdx];
newGraphDP.dstNodes[outArcIdx] = newNodesOffset[dstIdx];
newGraphDP.weights[outArcIdx] = weight;
// printf("ilabels %d olabels %d srcNodes %d dstNodes %d weights %f\n",
// newGraphDP.ilabels[outArcIdx], newGraphDP.olabels[outArcIdx],
// newGraphDP.srcNodes[outArcIdx], newGraphDP.dstNodes[outArcIdx],
// newGraphDP.weights[outArcIdx]);
gradInfoFirst[outArcIdx] = arcPair.x;
gradInfoSecond[outArcIdx] = arcPair.y;
}
}
__global__
void convertToNodePairKernel(
const int* flagsGPU,
const int* indicesGPU,
int* toExploreNodePairFirstGPU,
int* toExploreNodePairSecondGPU,
int extent,
size_t numFlags,
size_t numValidNodes) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numFlags) {
if (flagsGPU[gTid] == true) {
const int index = indicesGPU[gTid];
assert(index >= 0);
assert(index < numValidNodes);
int2 node = OneDToTwoDIndexGPU(gTid, extent);
toExploreNodePairFirstGPU[index] = node.x;
toExploreNodePairSecondGPU[index] = node.y;
}
}
}
// Convert bool array two pairs for true flags
std::tuple<int*, int*, size_t> convertToNodePairGPU(
const int* flagsGPU,
size_t numFlags,
int extent) {
int* indicesGPU;
size_t numIndices;
size_t numValidNodes;
std::tie(indicesGPU, numIndices, numValidNodes) = prefixSumScanGPU(flagsGPU, numFlags, false);
assert(numFlags == numIndices);
int* toExploreNodePairFirstGPU;
int* toExploreNodePairSecondGPU;
cudaMalloc((void **)(&(toExploreNodePairFirstGPU)), sizeof(int) * numValidNodes);
cudaMalloc((void **)(&(toExploreNodePairSecondGPU)), sizeof(int) * numValidNodes);
const int NT = 128;
const int gridSize = div_up(numFlags, NT);
convertToNodePairKernel<<<gridSize, NT, 0, 0>>>(flagsGPU, indicesGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
extent, numFlags, numValidNodes);
cudaFree(indicesGPU);
return std::make_tuple(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, numValidNodes);
}
__device__
int2 getStartAndAccept(
const GraphDataParallelGPU& graphDP1,
const GraphDataParallelGPU& graphDP2,
const int2& dstNodePair) {
int2 dstNodeStartAndAccept = make_int2(
graphDP1.start[dstNodePair.x] && graphDP2.start[dstNodePair.y],
graphDP1.accept[dstNodePair.x] &&
graphDP2.accept[dstNodePair.y]);
return dstNodeStartAndAccept;
}
GraphDataParallelGPU copyToGPU(const GraphDataParallel& graphDP) {
GraphDataParallelGPU graphDPGPU;
graphDPGPU.numNodes = graphDP.inArcOffset.size();
graphDPGPU.numArcs = graphDP.inArcs.size();
assert(graphDP.accept.size() == graphDPGPU.numNodes);
assert(graphDP.start.size() == graphDPGPU.numNodes);
assert(graphDP.inArcOffset.size() == graphDPGPU.numNodes);
assert(graphDP.outArcOffset.size() == graphDPGPU.numNodes);
assert(graphDP.inArcs.size() == graphDPGPU.numArcs);
assert(graphDP.outArcs.size() == graphDPGPU.numArcs);
assert(graphDP.ilabels.size() == graphDPGPU.numArcs);
assert(graphDP.olabels.size() == graphDPGPU.numArcs);
assert(graphDP.srcNodes.size() == graphDPGPU.numArcs);
assert(graphDP.dstNodes.size() == graphDPGPU.numArcs);
assert(graphDP.weights.size() == graphDPGPU.numArcs);
// Allocate memory
cudaMalloc((void **)(&(graphDPGPU.accept)), sizeof(int) * graphDPGPU.numNodes);
cudaMalloc((void **)(&(graphDPGPU.start)), sizeof(int) * graphDPGPU.numNodes);
cudaMalloc((void **)(&(graphDPGPU.inArcOffset)), sizeof(int) * graphDPGPU.numNodes);
cudaMalloc((void **)(&(graphDPGPU.outArcOffset)), sizeof(int) * graphDPGPU.numNodes);
cudaMalloc((void **)(&(graphDPGPU.inArcs)), sizeof(int) * graphDPGPU.numArcs);
cudaMalloc((void **)(&(graphDPGPU.outArcs)), sizeof(int) * graphDPGPU.numArcs);
cudaMalloc((void **)(&(graphDPGPU.ilabels)), sizeof(int) * graphDPGPU.numArcs);
cudaMalloc((void **)(&(graphDPGPU.olabels)), sizeof(int) * graphDPGPU.numArcs);
cudaMalloc((void **)(&(graphDPGPU.srcNodes)), sizeof(int) * graphDPGPU.numArcs);
cudaMalloc((void **)(&(graphDPGPU.dstNodes)), sizeof(int) * graphDPGPU.numArcs);
cudaMalloc((void **)(&(graphDPGPU.weights)), sizeof(float) * graphDPGPU.numArcs);
// Copy
cudaMemcpy((void *)(graphDPGPU.accept), (void *)(graphDP.accept.data()), sizeof(int) * graphDPGPU.numNodes, cudaMemcpyHostToDevice);
cudaMemcpy((void *)(graphDPGPU.start), (void *)(graphDP.start.data()), sizeof(int) * graphDPGPU.numNodes, cudaMemcpyHostToDevice);
cudaMemcpy((void *)(graphDPGPU.inArcOffset), (void *)(graphDP.inArcOffset.data()), sizeof(int) * graphDPGPU.numNodes, cudaMemcpyHostToDevice);
cudaMemcpy((void *)(graphDPGPU.outArcOffset), (void *)(graphDP.outArcOffset.data()), sizeof(int) * graphDPGPU.numNodes, cudaMemcpyHostToDevice);
cudaMemcpy((void *)(graphDPGPU.inArcs), (void *)(graphDP.inArcs.data()), sizeof(int) * graphDPGPU.numArcs, cudaMemcpyHostToDevice);
cudaMemcpy((void *)(graphDPGPU.outArcs), (void *)(graphDP.outArcs.data()), sizeof(int) * graphDPGPU.numArcs, cudaMemcpyHostToDevice);
cudaMemcpy((void *)(graphDPGPU.ilabels), (void *)(graphDP.ilabels.data()), sizeof(int) * graphDPGPU.numArcs, cudaMemcpyHostToDevice);
cudaMemcpy((void *)(graphDPGPU.olabels), (void *)(graphDP.olabels.data()), sizeof(int) * graphDPGPU.numArcs, cudaMemcpyHostToDevice);
cudaMemcpy((void *)(graphDPGPU.srcNodes), (void *)(graphDP.srcNodes.data()), sizeof(int) * graphDPGPU.numArcs, cudaMemcpyHostToDevice);
cudaMemcpy((void *)(graphDPGPU.dstNodes), (void *)(graphDP.dstNodes.data()), sizeof(int) * graphDPGPU.numArcs, cudaMemcpyHostToDevice);
cudaMemcpy((void *)(graphDPGPU.weights), (void *)(graphDP.weights.data()), sizeof(float) * graphDPGPU.numArcs, cudaMemcpyHostToDevice);
return graphDPGPU;
}
__global__
void findReachableKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* arcCrossProductOffsetGPU,
const int* toExploreNumArcsFirstGPU,
const int* toExploreNumArcsSecondGPU,
const int* toExploreNodePairFirstGPU,
const int* toExploreNodePairSecondGPU,
int numNodesFirst,
int totalArcs,
size_t numArcCrossProductOffset,
int* toExploreGPU,
int* reachableGPU,
int* epsilonMatchedGPU
) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < totalArcs) {
nodeAndArcPairGPU result =
computeNodeAndArcPair(
gTid, numArcCrossProductOffset, arcCrossProductOffsetGPU,
toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU);
// printf("tid = %d, valid = %d\n", gTid, result.isValid);
// Does this node pair match?
if (result.isValid) {
int inArcOffset = graphDP1GPU.inArcOffset[result.nodePair.x];
const int firstArcIdx = graphDP1GPU.inArcs[inArcOffset + result.arcPair.x];
inArcOffset = graphDP2GPU.inArcOffset[result.nodePair.y];
const int secondArcIdx = graphDP2GPU.inArcs[inArcOffset + result.arcPair.y];
// printf("tid = %d, cp = %d\n", gTid, result.checkArcPair);
if (result.checkArcPair &&
(graphDP1GPU.olabels[firstArcIdx] == graphDP2GPU.ilabels[secondArcIdx])) {
const int idx = TwoDToOneDIndex(
graphDP1GPU.srcNodes[firstArcIdx],
graphDP2GPU.srcNodes[secondArcIdx],
numNodesFirst);
// printf("tid = %d, idx = %d\n", gTid, idx);
if (graphDP1GPU.olabels[firstArcIdx] == epsilon) {
epsilonMatchedGPU[idx] = true;
}
// idx may not be unique amongst all threads.
/*
int oldVal = reachableGPU[idx];
if (!reachableGPU[idx]) {
reachableGPU[idx] = true;
}*/
int oldVal = atomicCAS(&(reachableGPU[idx]), false, true);
if (!oldVal) {
toExploreGPU[idx] = true;
}
// printf("r %d t %d \n", reachableGPU[idx], toExploreGPU[idx]);
}
// Only valid for arcs incoming to node from first graph
if (result.checkEpsilonArcPair.x &&
(graphDP1GPU.olabels[firstArcIdx] == epsilon)) {
const int idx = TwoDToOneDIndex(
graphDP1GPU.srcNodes[firstArcIdx], result.nodePair.y, numNodesFirst);
/*
int oldVal = reachableGPU[idx];
if (!reachableGPU[idx]) {
reachableGPU[idx] = true;
}*/
int oldVal = atomicCAS(&(reachableGPU[idx]), false, true);
if (!oldVal) {
toExploreGPU[idx] = true;
}
}
// Only valid for arcs incoming to node from second graph
if (result.checkEpsilonArcPair.y &&
(graphDP2GPU.ilabels[secondArcIdx] == epsilon)) {
const int idx = TwoDToOneDIndex(
result.nodePair.x, graphDP2GPU.srcNodes[secondArcIdx], numNodesFirst);
/*
int oldVal = reachableGPU[idx];
if (!reachableGPU[idx]) {
reachableGPU[idx] = true;
}*/
int oldVal = atomicCAS(&(reachableGPU[idx]), false, true);
if (!oldVal) {
toExploreGPU[idx] = true;
}
}
}
}
}
__global__
void computeValidNodeAndArcKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* arcCrossProductOffsetGPU,
const int* toExploreNumArcsFirstGPU,
const int* toExploreNumArcsSecondGPU,
const int* toExploreNodePairFirstGPU,
const int* toExploreNodePairSecondGPU,
const int* reachableGPU,
const int* epsilonMatchedGPU,
int numNodesFirst,
int totalArcs,
size_t numArcCrossProductOffset,
int* toExploreGPU,
int* newNodesGPU,
int* numInArcsGPU,
int* numOutArcsGPU
) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < totalArcs) {
// Map tid to corresponding node and arc pair
// Search to find which node pair this tid will fall into
nodeAndArcPairGPU result =
computeNodeAndArcPair(
gTid, numArcCrossProductOffset, arcCrossProductOffsetGPU,
toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU);
if (result.isValid) {
int outArcOffset = graphDP1GPU.outArcOffset[result.nodePair.x];
const int firstArcIdx = graphDP1GPU.outArcs[outArcOffset + result.arcPair.x];
outArcOffset = graphDP2GPU.outArcOffset[result.nodePair.y];
const int secondArcIdx =
graphDP2GPU.outArcs[outArcOffset + result.arcPair.y];
const bool epsilonMatch = epsilonMatchedGPU[TwoDToOneDIndex(
result.nodePair.x, result.nodePair.y, numNodesFirst)];
// Does this node pair match?
// Skip epsilon matches
if (result.checkArcPair &&
(graphDP1GPU.olabels[firstArcIdx] == graphDP2GPU.ilabels[secondArcIdx])) {
const int dstIdx = TwoDToOneDIndex(
graphDP1GPU.dstNodes[firstArcIdx],
graphDP2GPU.dstNodes[secondArcIdx],
numNodesFirst);
const int curIdx =
TwoDToOneDIndex(result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krnl 1a dst %d cur %d\n", dstIdx, curIdx);
// We track if any two arcs outgoing from this node pair match
// on epsilon. We record if they do.
if (graphDP1GPU.olabels[firstArcIdx] != epsilon) {
calculateNumArcsAndNodesToExplore(
curIdx,
dstIdx,
reachableGPU,
newNodesGPU,
toExploreGPU,
numOutArcsGPU,
numInArcsGPU);
}
}
if (result.checkEpsilonArcPair.x &&
(!epsilonMatch || graphDP2GPU.accept[result.nodePair.y] ||
!graphDP1GPU.accept[result.nodePair.x]) &&
(graphDP1GPU.olabels[firstArcIdx] == epsilon)) {
const int dstIdx = TwoDToOneDIndex(
graphDP1GPU.dstNodes[firstArcIdx], result.nodePair.y, numNodesFirst);
const int curIdx =
TwoDToOneDIndex(result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krnl 1b dst %d cur %d\n", dstIdx, curIdx);
calculateNumArcsAndNodesToExplore(
curIdx,
dstIdx,
reachableGPU,
newNodesGPU,
toExploreGPU,
numOutArcsGPU,
numInArcsGPU);
}
if (result.checkEpsilonArcPair.y &&
(!epsilonMatch || graphDP1GPU.accept[result.nodePair.x]) &&
(graphDP2GPU.ilabels[secondArcIdx] == epsilon)) {
const int dstIdx = TwoDToOneDIndex(
result.nodePair.x, graphDP2GPU.dstNodes[secondArcIdx], numNodesFirst);
const int curIdx =
TwoDToOneDIndex(result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krnl 1c dst %d cur %d\n", dstIdx, curIdx);
calculateNumArcsAndNodesToExplore(
curIdx,
dstIdx,
reachableGPU,
newNodesGPU,
toExploreGPU,
numOutArcsGPU,
numInArcsGPU);
}
}
}
}
__global__
void generateNodeAndArcKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* arcCrossProductOffsetGPU,
const int* toExploreNumArcsFirstGPU,
const int* toExploreNumArcsSecondGPU,
const int* toExploreNodePairFirstGPU,
const int* toExploreNodePairSecondGPU,
const int* reachableGPU,
const int* epsilonMatchedGPU,
int numNodesFirst,
int totalArcs,
size_t numArcCrossProductOffset,
GraphDataParallelGPU newGraphDPGPU,
int* toExploreGPU,
int* gradInfoFirstGPU,
int* gradInfoSecondGPU,
int* newNodesOffsetGPU,
int* newNodesVisitedGPU
) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < totalArcs) {
// Map tid to corresponding node and arc pair
// Search to find which node pair this tid will fall into
nodeAndArcPairGPU result =
computeNodeAndArcPair(
gTid, numArcCrossProductOffset, arcCrossProductOffsetGPU,
toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU);
if (result.isValid) {
int outArcOffset = graphDP1GPU.outArcOffset[result.nodePair.x];
const int firstArcIdx = graphDP1GPU.outArcs[outArcOffset + result.arcPair.x];
outArcOffset = graphDP2GPU.outArcOffset[result.nodePair.y];
const int secondArcIdx =
graphDP2GPU.outArcs[outArcOffset + result.arcPair.y];
const bool epsilonMatch = epsilonMatchedGPU[TwoDToOneDIndex(
result.nodePair.x, result.nodePair.y, numNodesFirst)];
// Does this node pair match?
if (result.checkArcPair &&
(graphDP1GPU.olabels[firstArcIdx] == graphDP2GPU.ilabels[secondArcIdx])) {
int2 dstNodePair = make_int2(
graphDP1GPU.dstNodes[firstArcIdx], graphDP2GPU.dstNodes[secondArcIdx]);
const int dstIdx = TwoDToOneDIndex(
dstNodePair.x, dstNodePair.y, numNodesFirst);
const int curIdx = TwoDToOneDIndex(
result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krn2a dstIdx=%d curIdx=%d\n", dstIdx, curIdx);
const int2 dstNodeStartAccept =
getStartAndAccept(graphDP1GPU, graphDP2GPU, dstNodePair);
// We track if any two arcs outgoing from this node pair match
// on epsilon. We record if they do.
if (graphDP1GPU.olabels[firstArcIdx] != epsilon) {
generateCombinedGraphNodesAndArcs(
dstIdx,
curIdx,
make_int2(firstArcIdx, secondArcIdx),
dstNodeStartAccept,
reachableGPU,
newNodesOffsetGPU,
newNodesVisitedGPU,
toExploreGPU,
gradInfoFirstGPU,
gradInfoSecondGPU,
newGraphDPGPU,
graphDP1GPU.ilabels[firstArcIdx],
graphDP2GPU.olabels[secondArcIdx],
graphDP1GPU.weights[firstArcIdx] + graphDP2GPU.weights[secondArcIdx]);
}
}
// The epsilon matches
if (result.checkEpsilonArcPair.x &&
(!epsilonMatch || graphDP2GPU.accept[result.nodePair.y] ||
!graphDP1GPU.accept[result.nodePair.x]) &&
(graphDP1GPU.olabels[firstArcIdx] == epsilon)) {
// When arc from first node has epsilon label then we consider
// second node
int2 dstNodePair = make_int2(
graphDP1GPU.dstNodes[firstArcIdx], result.nodePair.y);
const int dstIdx = TwoDToOneDIndex(
dstNodePair.x, dstNodePair.y, numNodesFirst);
const int curIdx = TwoDToOneDIndex(
result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krn2b dstIdx=%d curIdx=%d\n", dstIdx, curIdx);
const int2 dstNodeStartAccept =
getStartAndAccept(graphDP1GPU, graphDP2GPU, dstNodePair);
generateCombinedGraphNodesAndArcs(
dstIdx,
curIdx,
make_int2(firstArcIdx, -1),
dstNodeStartAccept,
reachableGPU,
newNodesOffsetGPU,
newNodesVisitedGPU,
toExploreGPU,
gradInfoFirstGPU,
gradInfoSecondGPU,
newGraphDPGPU,
graphDP1GPU.ilabels[firstArcIdx],
epsilon,
graphDP1GPU.weights[firstArcIdx]);
}
// The epsilon matches
if (result.checkEpsilonArcPair.y &&
(!epsilonMatch || graphDP1GPU.accept[result.nodePair.x]) &&
(graphDP2GPU.ilabels[secondArcIdx] == epsilon)) {
// When arc from second node has epsilon label then we consider
// first node
int2 dstNodePair = make_int2(
result.nodePair.x, graphDP2GPU.dstNodes[secondArcIdx]);
const int dstIdx = TwoDToOneDIndex(
dstNodePair.x, dstNodePair.y, numNodesFirst);
const int curIdx = TwoDToOneDIndex(
result.nodePair.x, result.nodePair.y, numNodesFirst);
// printf("krn2c dstIdx=%d curIdx=%d\n", dstIdx, curIdx);
const int2 dstNodeStartAndAccept =
getStartAndAccept(graphDP1GPU, graphDP2GPU, dstNodePair);
generateCombinedGraphNodesAndArcs(
dstIdx,
curIdx,
make_int2(-1, secondArcIdx),
dstNodeStartAndAccept,
reachableGPU,
newNodesOffsetGPU,
newNodesVisitedGPU,
toExploreGPU,
gradInfoFirstGPU,
gradInfoSecondGPU,
newGraphDPGPU,
epsilon,
graphDP2GPU.olabels[secondArcIdx],
graphDP2GPU.weights[secondArcIdx]);
}
}
}
}
__global__
void calculateNumArcsKernel(
const int* flagsGPU,
const int* indicesGPU,
const int* inputInArcsGPU,
const int* inputOutArcsGPU,
int* outputInArcsGPU,
int* outputOutArcsGPU,
size_t numFlags,
size_t numValidNodes) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numFlags) {
if (flagsGPU[gTid] == true) {
const int index = indicesGPU[gTid];
assert(index >= 0);
assert(index < numValidNodes);
outputInArcsGPU[index] = inputInArcsGPU[gTid];
outputOutArcsGPU[index] = inputOutArcsGPU[gTid];
}
}
}
__global__
void fourthPassInitKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* reachableGPU,
const int* newNodesOffsetGPU,
GraphDataParallelGPU newGraphDPGPU,
int* toExploreGPU,
int* newNodesVisitedGPU,
int numNodesFirst,
int numNodes) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numNodes) {
int2 indices = OneDToTwoDIndexGPU(gTid, numNodesFirst);
if (graphDP1GPU.start[indices.x] && graphDP2GPU.start[indices.y]) {
if (reachableGPU[gTid]) {
toExploreGPU[gTid] = true;
newNodesVisitedGPU[gTid] = true;
newGraphDPGPU.start[newNodesOffsetGPU[gTid]] = true;
newGraphDPGPU.accept[newNodesOffsetGPU[gTid]] =
graphDP1GPU.accept[indices.x] && graphDP2GPU.accept[indices.y];
}
}
}
}
__global__
void secondPassInitKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
const int* reachableGPU,
int* toExploreGPU,
int* newNodesGPU,
int numNodesFirst,
int numNodes) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numNodes) {
int2 indices = OneDToTwoDIndexGPU(gTid, numNodesFirst);
if (graphDP1GPU.start[indices.x] && graphDP2GPU.start[indices.y]) {
if (reachableGPU[gTid]) {
toExploreGPU[gTid] = true;
newNodesGPU[gTid] = true;
}
}
}
}
__global__
void findReachableInitInitKernel(
const GraphDataParallelGPU graphDP1GPU,
const GraphDataParallelGPU graphDP2GPU,
int* reachableGPU,
int* toExploreGPU,
int numNodesFirst,
int numNodes) {
const int gTid = blockIdx.x * blockDim.x + threadIdx.x;
if (gTid < numNodes) {
int2 indices = OneDToTwoDIndexGPU(gTid, numNodesFirst);
if (graphDP1GPU.accept[indices.x] && graphDP2GPU.accept[indices.y]) {
toExploreGPU[gTid] = true;
reachableGPU[gTid] = true;
}
}
}
} // namespace
Graph compose(const Graph& first, const Graph& second) {
GraphDataParallel graphDP1, graphDP2;
// Convert from AOS to SOA
graphDP1 = convertToDataParallel(first);
graphDP2 = convertToDataParallel(second);
// Copy to GPU
GraphDataParallelGPU graphDP1GPU, graphDP2GPU;
graphDP1GPU = copyToGPU(graphDP1);
graphDP2GPU = copyToGPU(graphDP2);
const int numAllPairNodes = first.numNodes() * second.numNodes();
const int numNodesFirst = first.numNodes();
// Fixed number of CUDA threads and stream for all kernels
const int NT = 128;
//////////////////////////////////////////////////////////////////////////
// Step 1: Data parallel findReachable
//////////////////////////////////////////////////////////////////////////
int* reachableGPU;
int* epsilonMatchedGPU;
int* toExploreGPU;
cudaMalloc((void **)(&reachableGPU), sizeof(int) * numAllPairNodes);
cudaMalloc((void **)(&epsilonMatchedGPU), sizeof(int) * numAllPairNodes);
cudaMalloc((void **)(&toExploreGPU), sizeof(int) * numAllPairNodes);
cudaMemset((void*)reachableGPU, false, sizeof(int) * numAllPairNodes);
cudaMemset((void*)epsilonMatchedGPU, false, sizeof(int) * numAllPairNodes);
cudaMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
{
const int gridSize = div_up(numAllPairNodes, NT);
findReachableInitInitKernel<<<gridSize, NT, 0, 0>>>(graphDP1GPU, graphDP2GPU,
reachableGPU, toExploreGPU, numNodesFirst, numAllPairNodes);
}
// std::cout << "num all pair nodes " << numAllPairNodes << std::endl;
// This is the outer control loop that would spawn DP kernels
while(checkAnyTrueGPU(toExploreGPU, numAllPairNodes)) {
int* toExploreNodePairFirstGPU;
int* toExploreNodePairSecondGPU;
size_t numToExploreNodePair;
// Convert bits set in toExplore to node pairs
std::tie(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, numToExploreNodePair) =
convertToNodePairGPU(toExploreGPU, numAllPairNodes, numNodesFirst);
int* arcCrossProductIndexGPU;
int* toExploreNumArcsFirstGPU;
int* toExploreNumArcsSecondGPU;
std::tie(arcCrossProductIndexGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU) =
calculateArcCrossProductOffsetGPU(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
numToExploreNodePair, graphDP1GPU, graphDP2GPU, true);
int* arcCrossProductOffsetGPU;
size_t numArcCrossProductOffset;
int totalArcs;
std::tie(arcCrossProductOffsetGPU, numArcCrossProductOffset, totalArcs) =
prefixSumScanGPU(arcCrossProductIndexGPU, numToExploreNodePair, true);
assert(numArcCrossProductOffset == (numToExploreNodePair + 1));
cudaFree(arcCrossProductIndexGPU);
// Reset so pristine state for next frontier to explore
cudaMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
if (totalArcs > 0) {
const int gridSize = div_up(totalArcs, NT);
findReachableKernel<<<gridSize, NT, 0, 0>>>(graphDP1GPU, graphDP2GPU, arcCrossProductOffsetGPU,
toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU, toExploreNodePairFirstGPU,
toExploreNodePairSecondGPU, numNodesFirst, totalArcs, numArcCrossProductOffset,
toExploreGPU, reachableGPU, epsilonMatchedGPU);
}
cudaFree(toExploreNodePairFirstGPU);
cudaFree(toExploreNodePairSecondGPU);
cudaFree(arcCrossProductOffsetGPU);
cudaFree(toExploreNumArcsFirstGPU);
cudaFree(toExploreNumArcsSecondGPU);
} // end while for findReachable
//////////////////////////////////////////////////////////////////////////
// Step 2: Compute a) valid nodes in combined graph
// b) Number of in and out arcs in combined graph
// This information is used to generate offsets for nodes and arcs
// in the combined graph
//////////////////////////////////////////////////////////////////////////
int* newNodesGPU;
int* numOutArcsGPU;
int* numInArcsGPU;
cudaMalloc((void **)(&newNodesGPU), sizeof(int) * numAllPairNodes);
cudaMalloc((void **)(&numOutArcsGPU), sizeof(int) * numAllPairNodes);
cudaMalloc((void **)(&numInArcsGPU), sizeof(int) * numAllPairNodes);
cudaMemset((void*)newNodesGPU, false, sizeof(int) * numAllPairNodes);
cudaMemset((void*)numOutArcsGPU, 0, sizeof(int) * numAllPairNodes);
cudaMemset((void*)numInArcsGPU, 0, sizeof(int) * numAllPairNodes);
cudaMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
{
const int gridSize = div_up(numAllPairNodes, NT);
secondPassInitKernel<<<gridSize, NT, 0, 0>>>(graphDP1GPU, graphDP2GPU, reachableGPU,
toExploreGPU, newNodesGPU, numNodesFirst, numAllPairNodes);
}
// This is the outer control loop that would spawn DP kernels
while(checkAnyTrueGPU(toExploreGPU, numAllPairNodes)) {
int* toExploreNodePairFirstGPU;
int* toExploreNodePairSecondGPU;
size_t numToExploreNodePair;
// Convert bits set in toExplore to node pairs
std::tie(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, numToExploreNodePair) =
convertToNodePairGPU(toExploreGPU, numAllPairNodes, numNodesFirst);
int* arcCrossProductIndexGPU;
int* toExploreNumArcsFirstGPU;
int* toExploreNumArcsSecondGPU;
std::tie(arcCrossProductIndexGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU) =
calculateArcCrossProductOffsetGPU(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
numToExploreNodePair, graphDP1GPU, graphDP2GPU, false);
int* arcCrossProductOffsetGPU;
size_t numArcCrossProductOffset;
int totalArcs;
std::tie(arcCrossProductOffsetGPU, numArcCrossProductOffset, totalArcs) =
prefixSumScanGPU(arcCrossProductIndexGPU, numToExploreNodePair, true);
assert(numArcCrossProductOffset == (numToExploreNodePair + 1));
cudaFree(arcCrossProductIndexGPU);
// Reset so pristine state for next frontier to explore
cudaMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
if (totalArcs > 0) {
const int gridSize = div_up(totalArcs, NT);
computeValidNodeAndArcKernel<<<gridSize, NT, 0, 0>>>(graphDP1GPU, graphDP2GPU,
arcCrossProductOffsetGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, reachableGPU,
epsilonMatchedGPU, numNodesFirst, totalArcs, numArcCrossProductOffset,
toExploreGPU, newNodesGPU, numInArcsGPU, numOutArcsGPU);
}
cudaFree(toExploreNodePairFirstGPU);
cudaFree(toExploreNodePairSecondGPU);
cudaFree(arcCrossProductOffsetGPU);
cudaFree(toExploreNumArcsFirstGPU);
cudaFree(toExploreNumArcsSecondGPU);
}
//////////////////////////////////////////////////////////////////////////
// Step 3: Generate offsets for nodes and arcs in combined graph
//////////////////////////////////////////////////////////////////////////
GraphDataParallelGPU newGraphDPGPU;
int totalNodes;
int* newNodesOffsetGPU;
size_t numElements;
std::tie(newNodesOffsetGPU, numElements, totalNodes) = prefixSumScanGPU(newNodesGPU, numAllPairNodes, false);
assert(numElements == numAllPairNodes);
newGraphDPGPU.numNodes = totalNodes;
cudaMalloc((void **)(&(newGraphDPGPU.start)), sizeof(int) * totalNodes);
cudaMalloc((void **)(&(newGraphDPGPU.accept)), sizeof(int) * totalNodes);
cudaMalloc((void **)(&(newGraphDPGPU.inArcOffset)), sizeof(int) * totalNodes);
cudaMalloc((void **)(&(newGraphDPGPU.outArcOffset)), sizeof(int) * totalNodes);
// Generate offsets for nodes and arcs
{
const int NT = 128;
const int gridSize = div_up(numAllPairNodes, NT);
calculateNumArcsKernel<<<gridSize, NT, 0, 0>>>(newNodesGPU, newNodesOffsetGPU,
numInArcsGPU, numOutArcsGPU, newGraphDPGPU.inArcOffset, newGraphDPGPU.outArcOffset,
numAllPairNodes, totalNodes);
}
int totalInArcs;
int totalOutArcs;
int* inArcOffsetGPU;
int* outArcOffsetGPU;
std::tie(inArcOffsetGPU, numElements, totalInArcs) = prefixSumScanGPU(newGraphDPGPU.inArcOffset, totalNodes, false);
assert(numElements == totalNodes);
std::tie(outArcOffsetGPU, numElements, totalOutArcs) = prefixSumScanGPU(newGraphDPGPU.outArcOffset, totalNodes, false);
assert(numElements == totalNodes);
assert(totalInArcs == totalOutArcs);
newGraphDPGPU.numArcs = totalOutArcs;
cudaMalloc((void **)(&(newGraphDPGPU.inArcs)), sizeof(int) * totalInArcs);
cudaMalloc((void **)(&(newGraphDPGPU.outArcs)), sizeof(int) * totalOutArcs);
cudaMalloc((void **)(&(newGraphDPGPU.ilabels)), sizeof(int) * totalOutArcs);
cudaMalloc((void **)(&(newGraphDPGPU.olabels)), sizeof(int) * totalOutArcs);
cudaMalloc((void **)(&(newGraphDPGPU.srcNodes)), sizeof(int) * totalOutArcs);
cudaMalloc((void **)(&(newGraphDPGPU.dstNodes)), sizeof(int) * totalOutArcs);
cudaMalloc((void **)(&(newGraphDPGPU.weights)), sizeof(float) * totalOutArcs);
cudaMemcpy((void *)(newGraphDPGPU.inArcOffset), (void *)(inArcOffsetGPU), sizeof(int) * totalNodes, cudaMemcpyDeviceToDevice);
cudaMemcpy((void *)(newGraphDPGPU.outArcOffset), (void *)(outArcOffsetGPU), sizeof(int) * totalNodes, cudaMemcpyDeviceToDevice);
// std::cout << "totalInArcs " << totalInArcs << " totalOutArcs " << totalOutArcs << std::endl;
// SOA for gradInfo
std::pair<std::vector<int>, std::vector<int>> gradInfo;
gradInfo.first.resize(totalOutArcs);
gradInfo.second.resize(totalOutArcs);
int *gradInfoFirstGPU;
int *gradInfoSecondGPU;
cudaMalloc((void **)(&gradInfoFirstGPU), sizeof(int) * totalOutArcs);
cudaMalloc((void **)(&gradInfoSecondGPU), sizeof(int) * totalOutArcs);
//////////////////////////////////////////////////////////////////////////
// Step 4: Generate nodes and arcs in combined graph
//////////////////////////////////////////////////////////////////////////
int* newNodesVisitedGPU;
cudaMalloc((void **)(&newNodesVisitedGPU), sizeof(int) * numAllPairNodes);
cudaMemset((void*)newNodesVisitedGPU, false, sizeof(int) * numAllPairNodes);
// Reset so pristine state for next frontier to explore
cudaMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
cudaMemset((void *)(newGraphDPGPU.start), false, sizeof(int) * totalNodes);
cudaMemset((void *)(newGraphDPGPU.accept), false, sizeof(int) * totalNodes);
{
const int gridSize = div_up(numAllPairNodes, NT);
fourthPassInitKernel<<<gridSize, NT, 0, 0>>>(graphDP1GPU, graphDP2GPU, reachableGPU,
newNodesOffsetGPU, newGraphDPGPU, toExploreGPU, newNodesVisitedGPU,
numNodesFirst, numAllPairNodes);
}
// This is the outer control loop that would spawn DP kernels
while(checkAnyTrueGPU(toExploreGPU, numAllPairNodes)) {
int* toExploreNodePairFirstGPU;
int* toExploreNodePairSecondGPU;
size_t numToExploreNodePair;
// Convert bits set in toExplore to node pairs
std::tie(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, numToExploreNodePair) =
convertToNodePairGPU(toExploreGPU, numAllPairNodes, numNodesFirst);
int* arcCrossProductIndexGPU;
int* toExploreNumArcsFirstGPU;
int* toExploreNumArcsSecondGPU;
std::tie(arcCrossProductIndexGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU) =
calculateArcCrossProductOffsetGPU(toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
numToExploreNodePair, graphDP1GPU, graphDP2GPU, false);
int* arcCrossProductOffsetGPU;
size_t numArcCrossProductOffset;
int totalArcs;
std::tie(arcCrossProductOffsetGPU, numArcCrossProductOffset, totalArcs) =
prefixSumScanGPU(arcCrossProductIndexGPU, numToExploreNodePair, true);
assert(numArcCrossProductOffset == (numToExploreNodePair + 1));
cudaFree(arcCrossProductIndexGPU);
// Reset so pristine state for next frontier to explore
cudaMemset((void*)toExploreGPU, false, sizeof(int) * numAllPairNodes);
if (totalArcs > 0) {
const int gridSize = div_up(totalArcs, NT);
generateNodeAndArcKernel<<<gridSize, NT, 0, 0>>>(graphDP1GPU, graphDP2GPU,
arcCrossProductOffsetGPU, toExploreNumArcsFirstGPU, toExploreNumArcsSecondGPU,
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU, reachableGPU,
epsilonMatchedGPU, numNodesFirst, totalArcs, numArcCrossProductOffset,
newGraphDPGPU, toExploreGPU, gradInfoFirstGPU, gradInfoSecondGPU,
newNodesOffsetGPU, newNodesVisitedGPU);
}
cudaFree(toExploreNodePairFirstGPU);
cudaFree(toExploreNodePairSecondGPU);
cudaFree(arcCrossProductOffsetGPU);
cudaFree(toExploreNumArcsFirstGPU);
cudaFree(toExploreNumArcsSecondGPU);
}
// Reset incremented offsets to original value
cudaMemcpy((void *)(newGraphDPGPU.inArcOffset), (void *)(inArcOffsetGPU), sizeof(int) * newGraphDPGPU.numNodes, cudaMemcpyDeviceToDevice);
cudaMemcpy((void *)(newGraphDPGPU.outArcOffset), (void *)(outArcOffsetGPU), sizeof(int) * newGraphDPGPU.numNodes, cudaMemcpyDeviceToDevice);
// Copy graph on GPU to CPU
GraphDataParallel newGraphDP;
newGraphDP.start.resize(totalNodes);
newGraphDP.accept.resize(totalNodes);
newGraphDP.inArcOffset.resize(totalNodes);
newGraphDP.outArcOffset.resize(totalNodes);
newGraphDP.inArcs.resize(totalInArcs);
newGraphDP.outArcs.resize(totalOutArcs);
newGraphDP.ilabels.resize(totalOutArcs);
newGraphDP.olabels.resize(totalOutArcs);
newGraphDP.srcNodes.resize(totalOutArcs);
newGraphDP.dstNodes.resize(totalOutArcs);
newGraphDP.weights.resize(totalOutArcs);
cudaMemcpy((void *)(newGraphDP.accept.data()), (void *)(newGraphDPGPU.accept), sizeof(int) * newGraphDPGPU.numNodes, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(newGraphDP.start.data()), (void *)(newGraphDPGPU.start), sizeof(int) * newGraphDPGPU.numNodes, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(newGraphDP.inArcOffset.data()), (void *)(newGraphDPGPU.inArcOffset), sizeof(int) * newGraphDPGPU.numNodes, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(newGraphDP.outArcOffset.data()), (void *)(newGraphDPGPU.outArcOffset), sizeof(int) * newGraphDPGPU.numNodes, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(newGraphDP.inArcs.data()), (void *)(newGraphDPGPU.inArcs), sizeof(int) * newGraphDPGPU.numArcs, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(newGraphDP.outArcs.data()), (void *)(newGraphDPGPU.outArcs), sizeof(int) * newGraphDPGPU.numArcs, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(newGraphDP.ilabels.data()), (void *)(newGraphDPGPU.ilabels), sizeof(int) * newGraphDPGPU.numArcs, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(newGraphDP.olabels.data()), (void *)(newGraphDPGPU.olabels), sizeof(int) * newGraphDPGPU.numArcs, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(newGraphDP.srcNodes.data()), (void *)(newGraphDPGPU.srcNodes), sizeof(int) * newGraphDPGPU.numArcs, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(newGraphDP.dstNodes.data()), (void *)(newGraphDPGPU.dstNodes), sizeof(int) * newGraphDPGPU.numArcs, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(newGraphDP.weights.data()), (void *)(newGraphDPGPU.weights), sizeof(float) * newGraphDPGPU.numArcs, cudaMemcpyDeviceToHost);
assert(newGraphDPGPU.numArcs == totalOutArcs);
cudaMemcpy((void *)(gradInfo.first.data()), (void *)(gradInfoFirstGPU), sizeof(int) * totalOutArcs, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(gradInfo.second.data()), (void *)(gradInfoSecondGPU), sizeof(int) * totalOutArcs, cudaMemcpyDeviceToHost);
cudaFree(reachableGPU);
cudaFree(epsilonMatchedGPU);
cudaFree(toExploreGPU);
cudaFree(newNodesGPU);
cudaFree(numOutArcsGPU);
cudaFree(numInArcsGPU);
cudaFree(newNodesOffsetGPU);
cudaFree(inArcOffsetGPU);
cudaFree(outArcOffsetGPU);
cudaFree(gradInfoFirstGPU);
cudaFree(gradInfoSecondGPU);
cudaFree(newNodesVisitedGPU);
cudaFree(newGraphDPGPU.start);
cudaFree(newGraphDPGPU.accept);
cudaFree(newGraphDPGPU.inArcOffset);
cudaFree(newGraphDPGPU.outArcOffset);
cudaFree(newGraphDPGPU.inArcs);
cudaFree(newGraphDPGPU.outArcs);
cudaFree(newGraphDPGPU.ilabels);
cudaFree(newGraphDPGPU.olabels);
cudaFree(newGraphDPGPU.srcNodes);
cudaFree(newGraphDPGPU.dstNodes);
cudaFree(newGraphDPGPU.weights);
newGraphDPGPU.numNodes = 0;
newGraphDPGPU.numArcs = 0;
if (0)
{
std::cout << "nodes " << newGraphDP.inArcOffset.size() << std::endl;
std::cout << "nodes " << newGraphDP.outArcOffset.size() << std::endl;
std::cout << "start" << std::endl;
for (auto i : newGraphDP.start) {
std::cout << i << std::endl;
}
std::cout << "accept" << std::endl;
for (auto i : newGraphDP.accept) {
std::cout << i << std::endl;
}
std::cout << "inArcOffset" << std::endl;
for (auto i : newGraphDP.inArcOffset) {
std::cout << i << std::endl;
}
std::cout << "outArcOffset" << std::endl;
for (auto i : newGraphDP.outArcOffset) {
std::cout << i << std::endl;
}
std::cout << "inArcs" << std::endl;
for (auto i : newGraphDP.inArcs) {
std::cout << i << std::endl;
}
std::cout << "outArcs" << std::endl;
for (auto i : newGraphDP.outArcs) {
std::cout << i << std::endl;
}
std::cout << "ilabels" << std::endl;
for (auto i : newGraphDP.ilabels) {
std::cout << i << std::endl;
}
std::cout << "olabels" << std::endl;
for (auto i : newGraphDP.olabels) {
std::cout << i << std::endl;
}
std::cout << "srcNodes" << std::endl;
for (auto i : newGraphDP.srcNodes) {
std::cout << i << std::endl;
}
std::cout << "dstNodes" << std::endl;
for (auto i : newGraphDP.dstNodes) {
std::cout << i << std::endl;
}
std::cout << "weights" << std::endl;
for (auto i : newGraphDP.weights) {
std::cout << i << std::endl;
}
}
// Not needed since the CPU data is never incremented
// Shift offset values back down after adding arcs to newGraphDP
// The offset values got converted from exclusive prefix sum to inclusive
// Need to convert them back to exclusive prefix sum by starting with 0
// and shifting to right by 1
// for (int i = newGraphDP.outArcOffset.size() - 1; i >= 0; --i) {
// newGraphDP.outArcOffset[i] = i == 0 ? 0 : newGraphDP.outArcOffset[i - 1];
// newGraphDP.inArcOffset[i] = i == 0 ? 0 : newGraphDP.inArcOffset[i - 1];
// }
// Convert back and add in autograd metadata
auto nGraph = convertFromDataParallel(newGraphDP);
nGraph.setInputs({first, second});
if (0)
{
std::cout << "numNodes " << nGraph.numNodes() << std::endl;
std::cout << "accept" << std::endl;
for (auto i : nGraph.accept()) {
std::cout << i << std::endl;
}
std::cout << "start" << std::endl;
for (auto i : nGraph.start()) {
std::cout << i << std::endl;
}
std::cout << "numIn" << std::endl;
for (int i = 0; i < nGraph.numNodes(); ++i) {
std::cout << nGraph.numIn(i) << std::endl;
}
std::cout << "numOut" << std::endl;
for (int i = 0; i < nGraph.numNodes(); ++i) {
std::cout << nGraph.numOut(i) << std::endl;
}
}
// Convert gradInfo SOA to AOS
std::vector<std::pair<int, int>> gradInfoAOS;
for (int i = 0; i < gradInfo.first.size(); ++i) {
gradInfoAOS.emplace_back(gradInfo.first[i], gradInfo.second[i]);
}
// TODO eliminate this copy pasta.
auto gradFunc = [gradInfo = std::move(gradInfoAOS)](
std::vector<Graph>& inputs, Graph deltas) {
// In this case the arc's parents are always from the
// first and second input graphs respectively.
bool calcGrad1 = inputs[0].calcGrad();
bool calcGrad2 = inputs[1].calcGrad();
auto grad1 = calcGrad1 ? std::vector<float>(inputs[0].numArcs(), 0.0)
: std::vector<float>{};
auto grad2 = calcGrad2 ? std::vector<float>(inputs[1].numArcs(), 0.0)
: std::vector<float>{};
for (int i = 0; i < gradInfo.size(); i++) {
auto arcGrad = deltas.weight(i);
auto& arcs = gradInfo[i];
if (calcGrad1 && arcs.first >= 0) {
grad1[arcs.first] += arcGrad;
}
if (calcGrad2 && arcs.second >= 0) {
grad2[arcs.second] += arcGrad;
}
}
inputs[0].addGrad(std::move(grad1));
inputs[1].addGrad(std::move(grad2));
};
nGraph.setGradFunc(std::move(gradFunc));
return nGraph;
}
} // namespace dataparallel
} // namespace detail
} // namespace gtn
/*
if (0)
{
int *aCPGPU;
int *tEN1GPU;
int *tEN2GPU;
std::tie(aCPGPU, tEN1GPU, tEN2GPU) = calculateArcCrossProductOffsetGPU(
toExploreNodePairFirstGPU, toExploreNodePairSecondGPU,
toExploreNodePair.first.size(), graphDP1GPU, graphDP2GPU, true);
std::vector<int> aCP(numToExploreNodePair);
std::vector<int> tEN1(numToExploreNodePair);
std::vector<int> tEN2(numToExploreNodePair);
cudaMemcpy((void *)(aCP.data()), (void *)(aCPGPU), sizeof(int) * numToExploreNodePair, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(tEN1.data()), (void *)(tEN1GPU), sizeof(int) * numToExploreNodePair, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(tEN2.data()), (void *)(tEN2GPU), sizeof(int) * numToExploreNodePair, cudaMemcpyDeviceToHost);
assert(std::equal(arcCrossProductOffset.begin(), arcCrossProductOffset.end(), aCP.begin()));
assert(std::equal(toExploreNumArcs.first.begin(), toExploreNumArcs.first.end(), tEN1.begin()));
assert(std::equal(toExploreNumArcs.second.begin(), toExploreNumArcs.second.end(), tEN2.begin()));
cudaFree(aCPGPU);
cudaFree(tEN1GPU);
cudaFree(tEN2GPU);
}*/
/*
if(0)
{
std::vector<int> tVec(arcCrossProductOffset);
const size_t numElts = tVec.size();
int* tVecGPU;
cudaMalloc((void **)(&tVecGPU), sizeof(int) * numElts);
cudaMemcpy((void *)tVecGPU, (void *)(tVec.data()), sizeof(int) * numElts, cudaMemcpyHostToDevice);
const int totalArcs = prefixSumScan(tVec, true);
int* tVecScanGPU;
size_t tVecScanElts;
int tArcsGPU;
std::tie(tVecScanGPU, tVecScanElts, tArcsGPU) = prefixSumScanGPU(tVecGPU, numElts, true);
assert(tVec.size() == (numElts + 1));
assert(tVecScanElts == (numElts + 1));
std::vector<int> tVecNew(tVec.size());
cudaMemcpy((void *)(tVecNew.data()), (void *)(tVecScanGPU), sizeof(int) * tVecScanElts, cudaMemcpyDeviceToHost);
assert(totalArcs == tArcsGPU);
assert(std::equal(tVec.begin(), tVec.end(), tVecNew.begin()));
cudaFree(tVecGPU);
cudaFree(tVecScanGPU);
}*/
/*
if (0)
{
int* tEN1GPU;
int* tEN2GPU;
size_t nTEN;
std::tie(tEN1GPU, tEN2GPU, nTEN) = convertToNodePairGPU(toExploreGPU, numAllPairNodes, numNodesFirst);
assert(nTEN == toExploreNodePair.first.size());
std::vector<int> tEN1(nTEN);
std::vector<int> tEN2(nTEN);
cudaMemcpy((void *)(tEN1.data()), (void *)(tEN1GPU), sizeof(int) * nTEN, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)(tEN2.data()), (void *)(tEN2GPU), sizeof(int) * nTEN, cudaMemcpyDeviceToHost);
assert(std::equal(toExploreNodePair.first.begin(), toExploreNodePair.first.end(), tEN1.begin()));
assert(std::equal(toExploreNodePair.second.begin(), toExploreNodePair.second.end(), tEN2.begin()));
cudaFree(tEN1GPU);
cudaFree(tEN2GPU);
}*/
/*
inline std::pair<int, int> OneDToTwoDIndex(int n, int n1Extent) {
assert(n1Extent > 0);
const int n2 = n / n1Extent;
const int n1 = n % n1Extent;
return std::make_pair(n1, n2);
}
bool checkAnyTrue(const std::vector<int>& flags) {
// Potentially wasteful - but GPU friendly
return std::accumulate(flags.begin(), flags.end(), 0) > 0 ? true : false;
}*/
/*
// Convert int array to pairs for true flags
std::pair<std::vector<int>, std::vector<int>> convertToNodePair(
const std::vector<int>& flags,
int extent) {
std::vector<int> indices(flags);
const int numValidNodes = prefixSumScan(indices, false);
std::vector<int> toExploreNodePairFirst(numValidNodes);
std::vector<int> toExploreNodePairSecond(numValidNodes);
// No loop dependence
for (size_t i = 0; i < flags.size(); ++i) {
if (flags[i] == true) {
std::pair<int, int> node = OneDToTwoDIndex(i, extent);
const int index = indices[i];
assert(index >= 0);
assert(index < numValidNodes);
toExploreNodePairFirst[index] = node.first;
toExploreNodePairSecond[index] = node.second;
}
}
return std::make_pair(toExploreNodePairFirst, toExploreNodePairSecond);
}*/
// Takes a pair of nodes, where each member of pair comes from a different
// graph and calculate a vector of number of arcs in the cross product of
// arcs outgoing from each pair.
// This should be a kernel call
/*
std::tuple<std::vector<int>, std::pair<std::vector<int>, std::vector<int>>>
calculateArcCrossProductOffset(
const std::pair<std::vector<int>, std::vector<int>>& toExploreNodePair,
const GraphDataParallel& graphDP1,
const GraphDataParallel& graphDP2,
bool inOrOutArc) {
assert(toExploreNodePair.first.size() == toExploreNodePair.second.size());
std::pair<std::vector<int>, std::vector<int>> toExploreNumArcs;
toExploreNumArcs.first.resize(toExploreNodePair.first.size());
toExploreNumArcs.second.resize(toExploreNodePair.first.size());
std::vector<int> arcCrossProductOffset(toExploreNodePair.first.size());
// No dependence between iterations
for (size_t i = 0; i < toExploreNodePair.first.size(); ++i) {
int node = (toExploreNodePair.first)[i];
// Special case if it is the last node. Then the offset becomes
// the number of arcs
const int inArcOffsetGraph1 = ((node + 1) == graphDP1.inArcOffset.size())
? graphDP1.inArcs.size()
: graphDP1.inArcOffset[node + 1];
const int outArcOffsetGraph1 = ((node + 1) == graphDP1.outArcOffset.size())
? graphDP1.outArcs.size()
: graphDP1.outArcOffset[node + 1];
const int numArcsFirst = inOrOutArc
? inArcOffsetGraph1 - graphDP1.inArcOffset[node]
: outArcOffsetGraph1 - graphDP1.outArcOffset[node];
node = (toExploreNodePair.second)[i];
// Special case if it is the last node. Then the offset becomes
// the number of arcs
const int inArcOffsetGraph2 = ((node + 1) == graphDP2.inArcOffset.size())
? graphDP2.inArcs.size()
: graphDP2.inArcOffset[node + 1];
const int outArcOffsetGraph2 = ((node + 1) == graphDP2.outArcOffset.size())
? graphDP2.outArcs.size()
: graphDP2.outArcOffset[node + 1];
const int numArcsSecond = inOrOutArc
? inArcOffsetGraph2 - graphDP2.inArcOffset[node]
: outArcOffsetGraph2 - graphDP2.outArcOffset[node];
(toExploreNumArcs.first)[i] = numArcsFirst;
(toExploreNumArcs.second)[i] = numArcsSecond;
// Even when numArcsFirst or numArcsSecond is 0 we have to consider
// the case when the other graph has arcs with epsilon label
if (numArcsFirst != 0 && numArcsSecond != 0) {
arcCrossProductOffset[i] = numArcsFirst * numArcsSecond;
} else if (numArcsFirst != 0 && numArcsSecond == 0) {
arcCrossProductOffset[i] = numArcsFirst;
} else if (numArcsFirst == 0 && numArcsSecond != 0) {
arcCrossProductOffset[i] = numArcsSecond;
} else {
arcCrossProductOffset[i] = 0;
}
}
return std::make_tuple(arcCrossProductOffset, toExploreNumArcs);
}*/
|
cf5d95cc794fe1e405ec7628965f3375ce7ac385.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file vectorAdd.cu
* @details This file describes the functions belonging to VectorADD class.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include "vectorAdd.h"
#include "vectorAdd_kernel.cu"
VectorADD::VectorADD()
{
h_A_VA = NULL;
h_B_VA = NULL;
h_C_VA = NULL;
d_A_VA = NULL;
d_B_VA = NULL;
d_C_VA = NULL;
n_vector_VA = 0;
numElements_VA = NULL;
max_numElements_VA = 0;
}
VectorADD::VectorADD(int size)
{
h_A_VA = NULL;
h_B_VA = NULL;
h_C_VA = NULL;
d_A_VA = NULL;
d_B_VA = NULL;
d_C_VA = NULL;
n_vector_VA = 0;
numElements_VA = NULL;
max_numElements_VA = 0;
n_vector_VA = 1;
numElements_VA = new int [n_vector_VA];
for(int i = 0; i < n_vector_VA; i++)
numElements_VA[i] = size;
max_numElements_VA = 0;
for(int i = 0; i < n_vector_VA; i++)
{
if(max_numElements_VA < numElements_VA[i])
max_numElements_VA = numElements_VA[i];
}
}
VectorADD::~VectorADD()
{
//Free host memory
if(h_A_VA!=NULL) hipHostFree(h_A_VA);
if(h_B_VA!=NULL) hipHostFree(h_B_VA);
if(h_C_VA!=NULL) hipHostFree(h_C_VA);
if(numElements_VA!=NULL) delete [] numElements_VA;
//Free device memory
if(d_A_VA!=NULL) hipFree(d_A_VA);
if(d_B_VA!=NULL) hipFree(d_B_VA);
if(d_C_VA!=NULL) hipFree(d_C_VA);
}
void VectorADD::allocHostMemory(void)
{
hipHostMalloc((void **)&h_A_VA, n_vector_VA * max_numElements_VA * sizeof(float));
hipHostMalloc((void **)&h_B_VA, n_vector_VA * max_numElements_VA * sizeof(float));
hipHostMalloc((void **)&h_C_VA, n_vector_VA * max_numElements_VA * sizeof(float));
}
void VectorADD::freeHostMemory(void)
{
if(h_A_VA!=NULL) hipHostFree(h_A_VA);
if(h_B_VA!=NULL) hipHostFree(h_B_VA);
if(h_C_VA!=NULL) hipHostFree(h_C_VA);
if(numElements_VA!=NULL) delete [] numElements_VA;
}
void VectorADD::allocDeviceMemory(void)
{
hipMalloc((void **)&d_A_VA, n_vector_VA * max_numElements_VA * sizeof(float));
hipMalloc((void **)&d_B_VA, n_vector_VA * max_numElements_VA * sizeof(float));
hipMalloc((void **)&d_C_VA, n_vector_VA * max_numElements_VA * sizeof(float));
}
void VectorADD::freeDeviceMemory(void)
{
if(d_A_VA!=NULL) hipFree(d_A_VA);
if(d_B_VA!=NULL) hipFree(d_B_VA);
if(d_C_VA!=NULL) hipFree(d_C_VA);
}
void VectorADD::generatingData(void)
{
for (int i = 0; i < n_vector_VA; i++)
{
for(int j = 0; j < numElements_VA[i]; j++)
{
h_A_VA[i*max_numElements_VA + j] = rand()/(float)RAND_MAX;
h_B_VA[i*max_numElements_VA + j] = rand()/(float)RAND_MAX;
}
}
}
void VectorADD::memHostToDeviceAsync(hipStream_t stream)
{
int idx_vector = 0;
hipMemcpyAsync(d_A_VA + idx_vector * max_numElements_VA, h_A_VA + idx_vector * max_numElements_VA, numElements_VA[idx_vector]*sizeof(float), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_B_VA + idx_vector * max_numElements_VA, h_B_VA + idx_vector * max_numElements_VA, numElements_VA[idx_vector]*sizeof(float), hipMemcpyHostToDevice, stream);
}
void VectorADD::memHostToDevice(void)
{
int idx_vector = 0;
hipMemcpy(d_A_VA + idx_vector * max_numElements_VA,
h_A_VA + idx_vector * max_numElements_VA,
numElements_VA[idx_vector]*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B_VA + idx_vector * max_numElements_VA,
h_B_VA + idx_vector * max_numElements_VA,
numElements_VA[idx_vector]*sizeof(float), hipMemcpyHostToDevice);
}
void VectorADD::memDeviceToHostAsync(hipStream_t stream)
{
int idx_vector = 0;
hipMemcpyAsync(h_C_VA + idx_vector * max_numElements_VA, d_C_VA + idx_vector * max_numElements_VA, numElements_VA[idx_vector]*sizeof(float), hipMemcpyDeviceToHost, stream);
}
void VectorADD::memDeviceToHost(void)
{
int idx_vector = 0;
hipMemcpy(h_C_VA + idx_vector * max_numElements_VA,
d_C_VA + idx_vector * max_numElements_VA,
numElements_VA[idx_vector]*sizeof(float), hipMemcpyDeviceToHost);
}
void VectorADD::launch_kernel_Async(hipStream_t stream)
{
int idx_vector = 0;
int threadsPerBlock = 256;
int blocksPerGrid = (ceil((float)numElements_VA[idx_vector]/(float)threadsPerBlock));
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, stream, d_A_VA + idx_vector * max_numElements_VA, d_B_VA + idx_vector * max_numElements_VA, d_C_VA + idx_vector * max_numElements_VA, numElements_VA[idx_vector], 0);
}
void VectorADD::launch_kernel(void)
{
int idx_vector = 0;
int threadsPerBlock = 256;
int blocksPerGrid = (ceil((float)numElements_VA[idx_vector]/(float)threadsPerBlock));
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A_VA + idx_vector * max_numElements_VA, d_B_VA + idx_vector * max_numElements_VA, d_C_VA + idx_vector * max_numElements_VA, numElements_VA[idx_vector], 0);
}
void VectorADD::checkResults(void)
{
int idx_vector = 0;
for (int i = 0; i < numElements_VA[idx_vector]; ++i)
{
//printf("i: %d - A: %f - B: %f - C: %f\n", i, h_A_vectorAdd[i], h_B_vectorAdd[i], h_C_vectorAdd[i]);
if (fabs(h_A_VA[idx_vector * max_numElements_VA + i] + h_B_VA[idx_vector * max_numElements_VA + i] - h_C_VA[idx_vector * max_numElements_VA + i]) > 1e-5)
{
printf("Result verification failed at element %d!\n", i);
}
}
}
void VectorADD::getBytesHTD(int *bytes_htd)
{
*bytes_htd = 2*(numElements_VA[0]*sizeof(float));
}
void VectorADD::getBytesDTH(int *bytes_dth)
{
*bytes_dth = numElements_VA[0]*sizeof(float);
}
void VectorADD::getTimeEstimations_HTD_DTH(int gpu, float *estimated_time_HTD, float *estimated_time_DTH,
float *estimated_overlapped_time_HTD, float *estimated_overlapped_time_DTH,
float LoHTD, float LoDTH, float GHTD, float GDTH, float overlappedGHTD, float overlappedGDTH)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, gpu);
int bytes_HTD;
int bytes_DTH;
getBytesHTD(&bytes_HTD);
getBytesDTH(&bytes_DTH);
*estimated_time_HTD = LoHTD + (bytes_HTD) * GHTD;
*estimated_overlapped_time_HTD = 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_HTD = LoHTD + (bytes_HTD) * overlappedGHTD;
*estimated_time_DTH = LoDTH + (bytes_DTH) * GDTH;
*estimated_overlapped_time_DTH= 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_DTH= LoDTH + (bytes_DTH) * overlappedGDTH;
}
| cf5d95cc794fe1e405ec7628965f3375ce7ac385.cu | /**
* @file vectorAdd.cu
* @details This file describes the functions belonging to VectorADD class.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include "vectorAdd.h"
#include "vectorAdd_kernel.cu"
VectorADD::VectorADD()
{
h_A_VA = NULL;
h_B_VA = NULL;
h_C_VA = NULL;
d_A_VA = NULL;
d_B_VA = NULL;
d_C_VA = NULL;
n_vector_VA = 0;
numElements_VA = NULL;
max_numElements_VA = 0;
}
VectorADD::VectorADD(int size)
{
h_A_VA = NULL;
h_B_VA = NULL;
h_C_VA = NULL;
d_A_VA = NULL;
d_B_VA = NULL;
d_C_VA = NULL;
n_vector_VA = 0;
numElements_VA = NULL;
max_numElements_VA = 0;
n_vector_VA = 1;
numElements_VA = new int [n_vector_VA];
for(int i = 0; i < n_vector_VA; i++)
numElements_VA[i] = size;
max_numElements_VA = 0;
for(int i = 0; i < n_vector_VA; i++)
{
if(max_numElements_VA < numElements_VA[i])
max_numElements_VA = numElements_VA[i];
}
}
VectorADD::~VectorADD()
{
//Free host memory
if(h_A_VA!=NULL) cudaFreeHost(h_A_VA);
if(h_B_VA!=NULL) cudaFreeHost(h_B_VA);
if(h_C_VA!=NULL) cudaFreeHost(h_C_VA);
if(numElements_VA!=NULL) delete [] numElements_VA;
//Free device memory
if(d_A_VA!=NULL) cudaFree(d_A_VA);
if(d_B_VA!=NULL) cudaFree(d_B_VA);
if(d_C_VA!=NULL) cudaFree(d_C_VA);
}
void VectorADD::allocHostMemory(void)
{
cudaMallocHost((void **)&h_A_VA, n_vector_VA * max_numElements_VA * sizeof(float));
cudaMallocHost((void **)&h_B_VA, n_vector_VA * max_numElements_VA * sizeof(float));
cudaMallocHost((void **)&h_C_VA, n_vector_VA * max_numElements_VA * sizeof(float));
}
void VectorADD::freeHostMemory(void)
{
if(h_A_VA!=NULL) cudaFreeHost(h_A_VA);
if(h_B_VA!=NULL) cudaFreeHost(h_B_VA);
if(h_C_VA!=NULL) cudaFreeHost(h_C_VA);
if(numElements_VA!=NULL) delete [] numElements_VA;
}
void VectorADD::allocDeviceMemory(void)
{
cudaMalloc((void **)&d_A_VA, n_vector_VA * max_numElements_VA * sizeof(float));
cudaMalloc((void **)&d_B_VA, n_vector_VA * max_numElements_VA * sizeof(float));
cudaMalloc((void **)&d_C_VA, n_vector_VA * max_numElements_VA * sizeof(float));
}
void VectorADD::freeDeviceMemory(void)
{
if(d_A_VA!=NULL) cudaFree(d_A_VA);
if(d_B_VA!=NULL) cudaFree(d_B_VA);
if(d_C_VA!=NULL) cudaFree(d_C_VA);
}
void VectorADD::generatingData(void)
{
for (int i = 0; i < n_vector_VA; i++)
{
for(int j = 0; j < numElements_VA[i]; j++)
{
h_A_VA[i*max_numElements_VA + j] = rand()/(float)RAND_MAX;
h_B_VA[i*max_numElements_VA + j] = rand()/(float)RAND_MAX;
}
}
}
void VectorADD::memHostToDeviceAsync(cudaStream_t stream)
{
int idx_vector = 0;
cudaMemcpyAsync(d_A_VA + idx_vector * max_numElements_VA, h_A_VA + idx_vector * max_numElements_VA, numElements_VA[idx_vector]*sizeof(float), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_B_VA + idx_vector * max_numElements_VA, h_B_VA + idx_vector * max_numElements_VA, numElements_VA[idx_vector]*sizeof(float), cudaMemcpyHostToDevice, stream);
}
void VectorADD::memHostToDevice(void)
{
int idx_vector = 0;
cudaMemcpy(d_A_VA + idx_vector * max_numElements_VA,
h_A_VA + idx_vector * max_numElements_VA,
numElements_VA[idx_vector]*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B_VA + idx_vector * max_numElements_VA,
h_B_VA + idx_vector * max_numElements_VA,
numElements_VA[idx_vector]*sizeof(float), cudaMemcpyHostToDevice);
}
void VectorADD::memDeviceToHostAsync(cudaStream_t stream)
{
int idx_vector = 0;
cudaMemcpyAsync(h_C_VA + idx_vector * max_numElements_VA, d_C_VA + idx_vector * max_numElements_VA, numElements_VA[idx_vector]*sizeof(float), cudaMemcpyDeviceToHost, stream);
}
void VectorADD::memDeviceToHost(void)
{
int idx_vector = 0;
cudaMemcpy(h_C_VA + idx_vector * max_numElements_VA,
d_C_VA + idx_vector * max_numElements_VA,
numElements_VA[idx_vector]*sizeof(float), cudaMemcpyDeviceToHost);
}
void VectorADD::launch_kernel_Async(cudaStream_t stream)
{
int idx_vector = 0;
int threadsPerBlock = 256;
int blocksPerGrid = (ceil((float)numElements_VA[idx_vector]/(float)threadsPerBlock));
vectorAdd<<<blocksPerGrid, threadsPerBlock, 0, stream>>>(d_A_VA + idx_vector * max_numElements_VA, d_B_VA + idx_vector * max_numElements_VA, d_C_VA + idx_vector * max_numElements_VA, numElements_VA[idx_vector], 0);
}
void VectorADD::launch_kernel(void)
{
int idx_vector = 0;
int threadsPerBlock = 256;
int blocksPerGrid = (ceil((float)numElements_VA[idx_vector]/(float)threadsPerBlock));
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A_VA + idx_vector * max_numElements_VA, d_B_VA + idx_vector * max_numElements_VA, d_C_VA + idx_vector * max_numElements_VA, numElements_VA[idx_vector], 0);
}
void VectorADD::checkResults(void)
{
int idx_vector = 0;
for (int i = 0; i < numElements_VA[idx_vector]; ++i)
{
//printf("i: %d - A: %f - B: %f - C: %f\n", i, h_A_vectorAdd[i], h_B_vectorAdd[i], h_C_vectorAdd[i]);
if (fabs(h_A_VA[idx_vector * max_numElements_VA + i] + h_B_VA[idx_vector * max_numElements_VA + i] - h_C_VA[idx_vector * max_numElements_VA + i]) > 1e-5)
{
printf("Result verification failed at element %d!\n", i);
}
}
}
void VectorADD::getBytesHTD(int *bytes_htd)
{
*bytes_htd = 2*(numElements_VA[0]*sizeof(float));
}
void VectorADD::getBytesDTH(int *bytes_dth)
{
*bytes_dth = numElements_VA[0]*sizeof(float);
}
void VectorADD::getTimeEstimations_HTD_DTH(int gpu, float *estimated_time_HTD, float *estimated_time_DTH,
float *estimated_overlapped_time_HTD, float *estimated_overlapped_time_DTH,
float LoHTD, float LoDTH, float GHTD, float GDTH, float overlappedGHTD, float overlappedGDTH)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, gpu);
int bytes_HTD;
int bytes_DTH;
getBytesHTD(&bytes_HTD);
getBytesDTH(&bytes_DTH);
*estimated_time_HTD = LoHTD + (bytes_HTD) * GHTD;
*estimated_overlapped_time_HTD = 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_HTD = LoHTD + (bytes_HTD) * overlappedGHTD;
*estimated_time_DTH = LoDTH + (bytes_DTH) * GDTH;
*estimated_overlapped_time_DTH= 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_DTH= LoDTH + (bytes_DTH) * overlappedGDTH;
}
|
cd10bdffa9de35124b2ab98bcb158ea2b8530703.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#define IDXV(x, y, ld) ((x) + (y) * (ld))
#define block 128
#define grid 256
__global__ static void pack_matrix(double* __restrict__ device_cube,
double* __restrict__ tmp_matrix,
const int n, const int index)
{
// for(int z=0; z<n; z++)
// for(int x=0; x<n; x++)
// tmp_matrix[z][x] = device_cube[z][index][x];
// tmp_matrix[z*n+x] = device_cube[z*n*n+index*n+x];
int ivx = IDXV(threadIdx.x, blockIdx.x, blockDim.x);
while(ivx < n*n){
int z = ivx / n;
int x = ivx - z * n;
tmp_matrix[ivx] = device_cube[z*n*n+index*n+x];
ivx += blockDim.x * gridDim.x;
}
}
__global__ static void unpack_matrix(double* __restrict__ tmp_matrix,
double* __restrict__ device_cube,
const int n, const int index)
{
// for(int z=0; z<n; z++)
// for(int x=0; x<n; x++)
// device_cube[z][index][x] = tmp_matrix[z][x];
// device_cube[z*n*n+index*n+x] = tmp_matrix[z*n+x];
int ivx = IDXV(threadIdx.x, blockIdx.x, blockDim.x);
while(ivx < n*n){
int z = ivx / n;
int x = ivx - z * n;
device_cube[z*n*n+index*n+x] = tmp_matrix[ivx];
ivx += blockDim.x * gridDim.x;
}
}
extern "C"
void call_pack(double* __restrict__ device_cube,
double* __restrict__ tmp_matrix,
const int n, const int index)
{
hipLaunchKernelGGL(( pack_matrix) , dim3(grid), dim3(block) , 0, 0, device_cube, tmp_matrix, n, index);
hipDeviceSynchronize();
}
extern "C"
void call_unpack(double* __restrict__ tmp_matrix,
double* __restrict__ device_cube,
const int n, const int index)
{
hipLaunchKernelGGL(( unpack_matrix) , dim3(grid), dim3(block) , 0, 0, tmp_matrix, device_cube, n, index);
hipDeviceSynchronize();
}
| cd10bdffa9de35124b2ab98bcb158ea2b8530703.cu | #include <cuda.h>
#define IDXV(x, y, ld) ((x) + (y) * (ld))
#define block 128
#define grid 256
__global__ static void pack_matrix(double* __restrict__ device_cube,
double* __restrict__ tmp_matrix,
const int n, const int index)
{
// for(int z=0; z<n; z++)
// for(int x=0; x<n; x++)
// tmp_matrix[z][x] = device_cube[z][index][x];
// tmp_matrix[z*n+x] = device_cube[z*n*n+index*n+x];
int ivx = IDXV(threadIdx.x, blockIdx.x, blockDim.x);
while(ivx < n*n){
int z = ivx / n;
int x = ivx - z * n;
tmp_matrix[ivx] = device_cube[z*n*n+index*n+x];
ivx += blockDim.x * gridDim.x;
}
}
__global__ static void unpack_matrix(double* __restrict__ tmp_matrix,
double* __restrict__ device_cube,
const int n, const int index)
{
// for(int z=0; z<n; z++)
// for(int x=0; x<n; x++)
// device_cube[z][index][x] = tmp_matrix[z][x];
// device_cube[z*n*n+index*n+x] = tmp_matrix[z*n+x];
int ivx = IDXV(threadIdx.x, blockIdx.x, blockDim.x);
while(ivx < n*n){
int z = ivx / n;
int x = ivx - z * n;
device_cube[z*n*n+index*n+x] = tmp_matrix[ivx];
ivx += blockDim.x * gridDim.x;
}
}
extern "C"
void call_pack(double* __restrict__ device_cube,
double* __restrict__ tmp_matrix,
const int n, const int index)
{
pack_matrix <<< grid, block >>> (device_cube, tmp_matrix, n, index);
cudaDeviceSynchronize();
}
extern "C"
void call_unpack(double* __restrict__ tmp_matrix,
double* __restrict__ device_cube,
const int n, const int index)
{
unpack_matrix <<< grid, block >>> (tmp_matrix, device_cube, n, index);
cudaDeviceSynchronize();
}
|
3ff979cecb9d03c220d091e8fc16c34a34003324.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include "hicoo.h"
#include "mttkrp_cuda_kernels.h"
#include <inttypes.h>
int ptiMTTKRPKernelHiCOO(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiNnzIndex max_nnzb,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiIndex blength,
const int impl_num,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
int result = 0;
/* Maximum settings */
ptiIndex max_nthreads_per_block = 256;
ptiIndex max_nblocks = 32768;
ptiIndex max_R = 4;
ptiIndex nthreadsx = 0;
ptiIndex nthreadsy = 0;
ptiIndex nblocks = 0;
ptiIndex shr_size = 0;
ptiNnzIndex all_nblocks = blength;
switch(nmodes) {
case 3: /* 3-D tensors */
switch(impl_num) {
case 1: // Naive, 1D
/* Set number of blocks and threads */
nthreadsx = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
// shr_size = 2 * nmodes * sizeof(ptiIndex);
break;
case 2:
nthreadsy = R;
nthreadsx = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
// shr_size = 2 * nmodes * sizeof(ptiIndex);
break;
case 3:
nthreadsx = R;
nthreadsy = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
// shr_size = 2 * nmodes * sizeof(ptiIndex);
break;
case 4:
nthreadsx = R;
if(R <= max_R)
nthreadsx = R;
else
nthreadsx = max_R;
nthreadsy = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
// shr_size = 2 * nmodes * sizeof(ptiIndex);
break;
/* Matrix blocked implementations */
case 14:
nthreadsx = R;
if(R <= max_R)
nthreadsx = R;
else
nthreadsx = max_R;
nthreadsy = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
// shr_size = 2 * nmodes * sizeof(ptiIndex);
break;
/* Shared memory for the output matrix + switch block sizes */
case 15:
nthreadsx = R;
if(R <= max_R) {
nthreadsx = R;
}
else {
nthreadsx = max_R;
}
nthreadsy = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
shr_size = (ptiIndex)pow(2, sb_bits) * R * sizeof(ptiValue);
break;
/* Shared memory for three matrices + switch block sizes */
case 16:
nthreadsx = R;
if(R <= max_R) {
nthreadsx = R;
}
else {
nthreadsx = max_R;
}
nthreadsy = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
shr_size = nmodes * (ptiIndex)pow(2, sb_bits) * R * sizeof(ptiValue);
break;
}
dim3 dimBlock(nthreadsx, nthreadsy);
switch(impl_num) {
case 1: // Naive, 1D
printf("\nExecute pti_MTTKRPKernelHiCOO_3D_naive (%u, %u)\n", nblocks, nthreadsx);
hipLaunchKernelGGL(( pti_MTTKRPKernelHiCOO_3D_naive), dim3(nblocks), dim3(nthreadsx), 0, 0,
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
case 2:
printf("\nExecute pti_MTTKRPKernelRankHiCOO_3D_naive (%u, %u, %u)\n", nblocks, nthreadsx, nthreadsy);
hipLaunchKernelGGL(( pti_MTTKRPKernelRankHiCOO_3D_naive), dim3(nblocks), dim3(dimBlock), 0, 0,
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
case 3:
printf("\nExecute pti_MTTKRPKernelRankSplitHiCOO_3D_naive (%u, %u, %u)\n", nblocks, nthreadsx, nthreadsy);
hipLaunchKernelGGL(( pti_MTTKRPKernelRankSplitHiCOO_3D_naive), dim3(nblocks), dim3(dimBlock), 0, 0,
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
case 4:
printf("\nExecute pti_MTTKRPKernelRankSplitHiCOORB_3D_naive (%u, %u, %u)\n", nblocks, nthreadsx, nthreadsy);
hipLaunchKernelGGL(( pti_MTTKRPKernelRankSplitHiCOORB_3D_naive), dim3(nblocks), dim3(dimBlock), 0, 0,
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
/* Matrix blocked implementations */
case 14:
printf("\nExecute pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked (%u, %u, %u)\n", nblocks, nthreadsx, nthreadsy);
hipLaunchKernelGGL(( pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked), dim3(nblocks), dim3(dimBlock), 0, 0,
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
/* Shared memory for the output matrix + switch block sizes */
case 15:
printf("\nExecute pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_SM (%u, %u, %u), SM: %u bytes\n", nblocks, nthreadsx, nthreadsy, shr_size);
hipLaunchKernelGGL(( pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_SM), dim3(nblocks), dim3(dimBlock), shr_size, 0,
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
/* Shared memory for three matrices + switch block sizes */
case 16:
printf("\nExecute pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_AllSM (%u, %u, %u), SM: %u bytes\n", nblocks, nthreadsx, nthreadsy, shr_size);
hipLaunchKernelGGL(( pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_AllSM), dim3(nblocks), dim3(dimBlock), shr_size, 0,
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
}
break;
} // End switch nmodes
result = hipDeviceSynchronize();
pti_CheckCudaError(result != 0, "CUDA HiCOO SpTns MTTKRP");
return 0;
}
/* impl_num = 01 Naive, 1-D
* Limitation: blockDim.x (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelHiCOO_3D_naive(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
ptiNnzIndex z;
ptiIndex block_coord_mode, block_coord_1, block_coord_2;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* Block indices */
block_coord_mode = dev_binds[mode][b] << sb_bits;
block_coord_1 = dev_binds[times_mat_index_1][b] << sb_bits;
block_coord_2 = dev_binds[times_mat_index_2][b] << sb_bits;
/* TODO: duplicated in registers */
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Thread level */
z = tidx + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiNnzIndex const mode_i = block_coord_mode + dev_einds[mode][z];
ptiNnzIndex const tmp_i_1 = block_coord_1 + dev_einds[times_mat_index_1][z];
ptiNnzIndex const tmp_i_2 = block_coord_2 + dev_einds[times_mat_index_2][z];
ptiValue tmp_val = 0;
for(ptiIndex r=0; r<R; ++r) {
tmp_val = entry * times_mat_1[tmp_i_1 * stride + r] * times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(mvals[mode_i * stride + r]), tmp_val);
}
} // End loop entries
}
} // End loop blocks
}
/* impl_num = 02 Naive, 2-D
* Limitation: blockDim.x (max_nnz) * R <= 1024.
*/
__global__ void pti_MTTKRPKernelRankHiCOO_3D_naive(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
ptiIndex block_coord_mode, block_coord_1, block_coord_2;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* Block indices */
block_coord_mode = dev_binds[mode][b] << sb_bits;
block_coord_1 = dev_binds[times_mat_index_1][b] << sb_bits;
block_coord_2 = dev_binds[times_mat_index_2][b] << sb_bits;
/* TODO: duplicated in registers */
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Thread level */
z = tidx + bptr_begin;
if(z < bptr_end) {
/* TODO: duplicated in R threads */
ptiValue const entry = dev_values[z];
ptiNnzIndex const mode_i = block_coord_mode + dev_einds[mode][z];
ptiNnzIndex const tmp_i_1 = block_coord_1 + dev_einds[times_mat_index_1][z];
ptiNnzIndex const tmp_i_2 = block_coord_2 + dev_einds[times_mat_index_2][z];
ptiValue tmp_val = 0;
tmp_val = entry * times_mat_1[tmp_i_1 * stride + tidy] * times_mat_2[tmp_i_2 * stride + tidy];
atomicAdd(&(mvals[mode_i * stride + tidy]), tmp_val);
} // End loop entries
}
} // End loop blocks
}
/* impl_num = 03 Naive, 2-D, exchange tidx and tidy.
* Limitation: R * blockDim.y (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelRankSplitHiCOO_3D_naive(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
ptiIndex block_coord_mode, block_coord_1, block_coord_2;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* Block indices */
block_coord_mode = dev_binds[mode][b] << sb_bits;
block_coord_1 = dev_binds[times_mat_index_1][b] << sb_bits;
block_coord_2 = dev_binds[times_mat_index_2][b] << sb_bits;
/* TODO: duplicated in registers */
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiNnzIndex const mode_i = block_coord_mode + dev_einds[mode][z];
ptiNnzIndex const tmp_i_1 = block_coord_1 + dev_einds[times_mat_index_1][z];
ptiNnzIndex const tmp_i_2 = block_coord_2 + dev_einds[times_mat_index_2][z];
ptiValue tmp_val = 0;
tmp_val = entry * times_mat_1[tmp_i_1 * stride + tidx] * times_mat_2[tmp_i_2 * stride + tidx];
atomicAdd(&(mvals[mode_i * stride + tidx]), tmp_val);
} // End loop entries
}
} // End loop blocks
}
/* impl_num = 04 Naive, 2-D, with rank blocking.
* Limitation: max_R * blockDim.y (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelRankSplitHiCOORB_3D_naive(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
ptiIndex block_coord_mode, block_coord_1, block_coord_2;
const ptiIndex num_loops_r = R / blockDim.x;
const ptiIndex rest_loop = R - num_loops_r * blockDim.x;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* Block indices */
block_coord_mode = dev_binds[mode][b] << sb_bits;
block_coord_1 = dev_binds[times_mat_index_1][b] << sb_bits;
block_coord_2 = dev_binds[times_mat_index_2][b] << sb_bits;
/* TODO: duplicated in registers */
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiNnzIndex const mode_i = block_coord_mode + dev_einds[mode][z];
ptiNnzIndex const tmp_i_1 = block_coord_1 + dev_einds[times_mat_index_1][z];
ptiNnzIndex const tmp_i_2 = block_coord_2 + dev_einds[times_mat_index_2][z];
ptiIndex r;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * times_mat_1[tmp_i_1 * stride + r] * times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(mvals[mode_i * stride + r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * times_mat_1[tmp_i_1 * stride + r] * times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(mvals[mode_i * stride + r]), tmp_val);
}
} // End loop entries
}
} // End loop blocks
}
/* impl_num = 14 Matrix Blocked, 2-D, with rank blocking.
* Limitation: max_R * blockDim.y (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
const ptiIndex num_loops_r = R / blockDim.x;
const ptiIndex rest_loop = R - num_loops_r * blockDim.x;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* TODO: duplicated in registers */
ptiValue * blocked_mvals = mvals + (dev_binds[mode][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_1 = times_mat_1 + (dev_binds[times_mat_index_1][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_2 = times_mat_2 + (dev_binds[times_mat_index_2][b] << sb_bits) * stride;
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiElementIndex const mode_i = dev_einds[mode][z];
ptiElementIndex const tmp_i_1 = dev_einds[times_mat_index_1][z];
ptiElementIndex const tmp_i_2 = dev_einds[times_mat_index_2][z];
ptiValue * const bmvals_row = blocked_mvals + mode_i * stride;
ptiIndex r;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
} // End loop entries
}
} // End loop blocks
}
/* impl_num = 15 Matrix Blocked, 2-D, with rank blocking.
* + switch according to block size
* use shared memory for the output matrix
* Limitation: max_R * blockDim.y (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_SM(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiIndex const sb_size = (ptiIndex) 1 << sb_bits;
/* Data in shared memory */
extern __shared__ ptiValue mempool[];
ptiValue * sm_blocked_mvals = mempool;
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
const ptiIndex num_loops_r = R / blockDim.x;
const ptiIndex rest_loop = R - num_loops_r * blockDim.x;
ptiIndex r;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* TODO: duplicated in registers */
ptiIndex blocked_mode_i = dev_binds[mode][b] << sb_bits;
ptiValue * blocked_mvals = mvals + (dev_binds[mode][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_1 = times_mat_1 + (dev_binds[times_mat_index_1][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_2 = times_mat_2 + (dev_binds[times_mat_index_2][b] << sb_bits) * stride;
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Enough matrix reuse */
if (bptr_end - bptr_begin > sb_size) {
/* Load mats[nmodes] into shared memory, use R instead of stride. */
if (tidy < sb_size) {
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
sm_blocked_mvals[tidy * R + r] = 0;
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
sm_blocked_mvals[tidy * R + r] = 0;
}
}
__syncthreads();
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiElementIndex const mode_i = dev_einds[mode][z];
ptiElementIndex const tmp_i_1 = dev_einds[times_mat_index_1][z];
ptiElementIndex const tmp_i_2 = dev_einds[times_mat_index_2][z];
ptiValue * const bmvals_row = sm_blocked_mvals + mode_i * R;
ptiValue * const blocked_times_mat_1_row = blocked_times_mat_1 + tmp_i_1 * stride;
ptiValue * const blocked_times_mat_2_row = blocked_times_mat_2 + tmp_i_2 * stride;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
} // End loop entries
/* Store back mats[nmodes] from shared memory */
if (tidy < sb_size) {
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
atomicAdd( &(blocked_mvals[tidy * stride + r]), sm_blocked_mvals[tidy * stride + r] );
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
atomicAdd( &(blocked_mvals[tidy * stride + r]), sm_blocked_mvals[tidy * stride + r] );
}
}
} else { /* Not enough matrix reuse */
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiElementIndex const mode_i = dev_einds[mode][z];
ptiElementIndex const tmp_i_1 = dev_einds[times_mat_index_1][z];
ptiElementIndex const tmp_i_2 = dev_einds[times_mat_index_2][z];
ptiValue * const bmvals_row = blocked_mvals + mode_i * stride;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
} // End loop entries
} // End if: block size
} // End if: block range
} // End loop blocks
}
/* impl_num = 16 Matrix Blocked, 2-D, with rank blocking. TODO: BUG EXISTS.
* + switch according to block size
* use shared memory for three matrices
* Limitation: max_R * blockDim.y (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_AllSM(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiIndex const sb_size = (ptiIndex)powf(2, sb_bits);
/* Data in shared memory */
extern __shared__ ptiValue mempool[];
ptiValue * sm_blocked_mvals = mempool;
ptiValue * sm_blocked_times_mat_1 = mempool + sb_size * R * sizeof(ptiValue);
ptiValue * sm_blocked_times_mat_2 = sm_blocked_times_mat_1 + sb_size * R * sizeof(ptiValue);
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
const ptiIndex num_loops_r = R / blockDim.x;
const ptiIndex rest_loop = R - num_loops_r * blockDim.x;
ptiIndex r;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* TODO: duplicated in registers */
ptiIndex blocked_mode_i = dev_binds[mode][b] << sb_bits;
ptiValue * blocked_mvals = mvals + (dev_binds[mode][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_1 = times_mat_1 + (dev_binds[times_mat_index_1][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_2 = times_mat_2 + (dev_binds[times_mat_index_2][b] << sb_bits) * stride;
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Enough matrix reuse */
if (bptr_end - bptr_begin > sb_size) {
/* Load mats[nmodes] into shared memory, use R instead of stride. */
if (tidy < sb_size) {
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
sm_blocked_mvals[tidy * R + r] = 0;
sm_blocked_times_mat_1[tidy * R + r] = blocked_times_mat_1[tidy * stride + r];
sm_blocked_times_mat_2[tidy * R + r] = blocked_times_mat_2[tidy * stride + r];
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
sm_blocked_mvals[tidy * R + r] = 0;
sm_blocked_times_mat_1[tidy * R + r] = blocked_times_mat_1[tidy * stride + r];
sm_blocked_times_mat_2[tidy * R + r] = blocked_times_mat_2[tidy * stride + r];
}
}
__syncthreads();
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiElementIndex const mode_i = dev_einds[mode][z];
ptiElementIndex const tmp_i_1 = dev_einds[times_mat_index_1][z];
ptiElementIndex const tmp_i_2 = dev_einds[times_mat_index_2][z];
ptiValue * const bmvals_row = sm_blocked_mvals + mode_i * R;
ptiValue * const blocked_times_mat_1_row = sm_blocked_times_mat_1 + tmp_i_1 * R;
ptiValue * const blocked_times_mat_2_row = sm_blocked_times_mat_2 + tmp_i_2 * R;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
} // End loop entries
/* Store back mats[nmodes] from shared memory */
if (tidy < sb_size) {
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
atomicAdd( &(blocked_mvals[tidy * stride + r]), sm_blocked_mvals[tidy * R + r] );
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
atomicAdd( &(blocked_mvals[tidy * stride + r]), sm_blocked_mvals[tidy * R + r] );
}
}
} else { /* Not enough matrix reuse */
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiElementIndex const mode_i = dev_einds[mode][z];
ptiElementIndex const tmp_i_1 = dev_einds[times_mat_index_1][z];
ptiElementIndex const tmp_i_2 = dev_einds[times_mat_index_2][z];
ptiValue * const bmvals_row = blocked_mvals + mode_i * stride;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
} // End loop entries
} // End if: block size
} // End if: block range
} // End loop blocks
}
| 3ff979cecb9d03c220d091e8fc16c34a34003324.cu | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include "hicoo.h"
#include "mttkrp_cuda_kernels.h"
#include <inttypes.h>
int ptiMTTKRPKernelHiCOO(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiNnzIndex max_nnzb,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiIndex blength,
const int impl_num,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
int result = 0;
/* Maximum settings */
ptiIndex max_nthreads_per_block = 256;
ptiIndex max_nblocks = 32768;
ptiIndex max_R = 4;
ptiIndex nthreadsx = 0;
ptiIndex nthreadsy = 0;
ptiIndex nblocks = 0;
ptiIndex shr_size = 0;
ptiNnzIndex all_nblocks = blength;
switch(nmodes) {
case 3: /* 3-D tensors */
switch(impl_num) {
case 1: // Naive, 1D
/* Set number of blocks and threads */
nthreadsx = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
// shr_size = 2 * nmodes * sizeof(ptiIndex);
break;
case 2:
nthreadsy = R;
nthreadsx = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
// shr_size = 2 * nmodes * sizeof(ptiIndex);
break;
case 3:
nthreadsx = R;
nthreadsy = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
// shr_size = 2 * nmodes * sizeof(ptiIndex);
break;
case 4:
nthreadsx = R;
if(R <= max_R)
nthreadsx = R;
else
nthreadsx = max_R;
nthreadsy = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
// shr_size = 2 * nmodes * sizeof(ptiIndex);
break;
/* Matrix blocked implementations */
case 14:
nthreadsx = R;
if(R <= max_R)
nthreadsx = R;
else
nthreadsx = max_R;
nthreadsy = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
// shr_size = 2 * nmodes * sizeof(ptiIndex);
break;
/* Shared memory for the output matrix + switch block sizes */
case 15:
nthreadsx = R;
if(R <= max_R) {
nthreadsx = R;
}
else {
nthreadsx = max_R;
}
nthreadsy = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
shr_size = (ptiIndex)pow(2, sb_bits) * R * sizeof(ptiValue);
break;
/* Shared memory for three matrices + switch block sizes */
case 16:
nthreadsx = R;
if(R <= max_R) {
nthreadsx = R;
}
else {
nthreadsx = max_R;
}
nthreadsy = max_nnzb;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
shr_size = nmodes * (ptiIndex)pow(2, sb_bits) * R * sizeof(ptiValue);
break;
}
dim3 dimBlock(nthreadsx, nthreadsy);
switch(impl_num) {
case 1: // Naive, 1D
printf("\nExecute pti_MTTKRPKernelHiCOO_3D_naive (%u, %u)\n", nblocks, nthreadsx);
pti_MTTKRPKernelHiCOO_3D_naive<<<nblocks, nthreadsx>>>(
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
case 2:
printf("\nExecute pti_MTTKRPKernelRankHiCOO_3D_naive (%u, %u, %u)\n", nblocks, nthreadsx, nthreadsy);
pti_MTTKRPKernelRankHiCOO_3D_naive<<<nblocks, dimBlock>>>(
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
case 3:
printf("\nExecute pti_MTTKRPKernelRankSplitHiCOO_3D_naive (%u, %u, %u)\n", nblocks, nthreadsx, nthreadsy);
pti_MTTKRPKernelRankSplitHiCOO_3D_naive<<<nblocks, dimBlock>>>(
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
case 4:
printf("\nExecute pti_MTTKRPKernelRankSplitHiCOORB_3D_naive (%u, %u, %u)\n", nblocks, nthreadsx, nthreadsy);
pti_MTTKRPKernelRankSplitHiCOORB_3D_naive<<<nblocks, dimBlock>>>(
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
/* Matrix blocked implementations */
case 14:
printf("\nExecute pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked (%u, %u, %u)\n", nblocks, nthreadsx, nthreadsy);
pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked<<<nblocks, dimBlock>>>(
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
/* Shared memory for the output matrix + switch block sizes */
case 15:
printf("\nExecute pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_SM (%u, %u, %u), SM: %u bytes\n", nblocks, nthreadsx, nthreadsy, shr_size);
pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_SM<<<nblocks, dimBlock, shr_size>>>(
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
/* Shared memory for three matrices + switch block sizes */
case 16:
printf("\nExecute pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_AllSM (%u, %u, %u), SM: %u bytes\n", nblocks, nthreadsx, nthreadsy, shr_size);
pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_AllSM<<<nblocks, dimBlock, shr_size>>>(
mode,
nmodes,
nnz,
R,
stride,
sb_bits,
sc_bits,
blength,
kptr_begin,
kptr_end,
dev_ndims,
dev_cptr,
dev_bptr,
dev_binds,
dev_einds,
dev_values,
dev_mats_order,
dev_mats);
break;
}
break;
} // End switch nmodes
result = cudaThreadSynchronize();
pti_CheckCudaError(result != 0, "CUDA HiCOO SpTns MTTKRP");
return 0;
}
/* impl_num = 01 Naive, 1-D
* Limitation: blockDim.x (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelHiCOO_3D_naive(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
ptiNnzIndex z;
ptiIndex block_coord_mode, block_coord_1, block_coord_2;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* Block indices */
block_coord_mode = dev_binds[mode][b] << sb_bits;
block_coord_1 = dev_binds[times_mat_index_1][b] << sb_bits;
block_coord_2 = dev_binds[times_mat_index_2][b] << sb_bits;
/* TODO: duplicated in registers */
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Thread level */
z = tidx + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiNnzIndex const mode_i = block_coord_mode + dev_einds[mode][z];
ptiNnzIndex const tmp_i_1 = block_coord_1 + dev_einds[times_mat_index_1][z];
ptiNnzIndex const tmp_i_2 = block_coord_2 + dev_einds[times_mat_index_2][z];
ptiValue tmp_val = 0;
for(ptiIndex r=0; r<R; ++r) {
tmp_val = entry * times_mat_1[tmp_i_1 * stride + r] * times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(mvals[mode_i * stride + r]), tmp_val);
}
} // End loop entries
}
} // End loop blocks
}
/* impl_num = 02 Naive, 2-D
* Limitation: blockDim.x (max_nnz) * R <= 1024.
*/
__global__ void pti_MTTKRPKernelRankHiCOO_3D_naive(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
ptiIndex block_coord_mode, block_coord_1, block_coord_2;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* Block indices */
block_coord_mode = dev_binds[mode][b] << sb_bits;
block_coord_1 = dev_binds[times_mat_index_1][b] << sb_bits;
block_coord_2 = dev_binds[times_mat_index_2][b] << sb_bits;
/* TODO: duplicated in registers */
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Thread level */
z = tidx + bptr_begin;
if(z < bptr_end) {
/* TODO: duplicated in R threads */
ptiValue const entry = dev_values[z];
ptiNnzIndex const mode_i = block_coord_mode + dev_einds[mode][z];
ptiNnzIndex const tmp_i_1 = block_coord_1 + dev_einds[times_mat_index_1][z];
ptiNnzIndex const tmp_i_2 = block_coord_2 + dev_einds[times_mat_index_2][z];
ptiValue tmp_val = 0;
tmp_val = entry * times_mat_1[tmp_i_1 * stride + tidy] * times_mat_2[tmp_i_2 * stride + tidy];
atomicAdd(&(mvals[mode_i * stride + tidy]), tmp_val);
} // End loop entries
}
} // End loop blocks
}
/* impl_num = 03 Naive, 2-D, exchange tidx and tidy.
* Limitation: R * blockDim.y (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelRankSplitHiCOO_3D_naive(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
ptiIndex block_coord_mode, block_coord_1, block_coord_2;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* Block indices */
block_coord_mode = dev_binds[mode][b] << sb_bits;
block_coord_1 = dev_binds[times_mat_index_1][b] << sb_bits;
block_coord_2 = dev_binds[times_mat_index_2][b] << sb_bits;
/* TODO: duplicated in registers */
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiNnzIndex const mode_i = block_coord_mode + dev_einds[mode][z];
ptiNnzIndex const tmp_i_1 = block_coord_1 + dev_einds[times_mat_index_1][z];
ptiNnzIndex const tmp_i_2 = block_coord_2 + dev_einds[times_mat_index_2][z];
ptiValue tmp_val = 0;
tmp_val = entry * times_mat_1[tmp_i_1 * stride + tidx] * times_mat_2[tmp_i_2 * stride + tidx];
atomicAdd(&(mvals[mode_i * stride + tidx]), tmp_val);
} // End loop entries
}
} // End loop blocks
}
/* impl_num = 04 Naive, 2-D, with rank blocking.
* Limitation: max_R * blockDim.y (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelRankSplitHiCOORB_3D_naive(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
ptiIndex block_coord_mode, block_coord_1, block_coord_2;
const ptiIndex num_loops_r = R / blockDim.x;
const ptiIndex rest_loop = R - num_loops_r * blockDim.x;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* Block indices */
block_coord_mode = dev_binds[mode][b] << sb_bits;
block_coord_1 = dev_binds[times_mat_index_1][b] << sb_bits;
block_coord_2 = dev_binds[times_mat_index_2][b] << sb_bits;
/* TODO: duplicated in registers */
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiNnzIndex const mode_i = block_coord_mode + dev_einds[mode][z];
ptiNnzIndex const tmp_i_1 = block_coord_1 + dev_einds[times_mat_index_1][z];
ptiNnzIndex const tmp_i_2 = block_coord_2 + dev_einds[times_mat_index_2][z];
ptiIndex r;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * times_mat_1[tmp_i_1 * stride + r] * times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(mvals[mode_i * stride + r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * times_mat_1[tmp_i_1 * stride + r] * times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(mvals[mode_i * stride + r]), tmp_val);
}
} // End loop entries
}
} // End loop blocks
}
/* impl_num = 14 Matrix Blocked, 2-D, with rank blocking.
* Limitation: max_R * blockDim.y (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
const ptiIndex num_loops_r = R / blockDim.x;
const ptiIndex rest_loop = R - num_loops_r * blockDim.x;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* TODO: duplicated in registers */
ptiValue * blocked_mvals = mvals + (dev_binds[mode][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_1 = times_mat_1 + (dev_binds[times_mat_index_1][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_2 = times_mat_2 + (dev_binds[times_mat_index_2][b] << sb_bits) * stride;
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiElementIndex const mode_i = dev_einds[mode][z];
ptiElementIndex const tmp_i_1 = dev_einds[times_mat_index_1][z];
ptiElementIndex const tmp_i_2 = dev_einds[times_mat_index_2][z];
ptiValue * const bmvals_row = blocked_mvals + mode_i * stride;
ptiIndex r;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
} // End loop entries
}
} // End loop blocks
}
/* impl_num = 15 Matrix Blocked, 2-D, with rank blocking.
* + switch according to block size
* use shared memory for the output matrix
* Limitation: max_R * blockDim.y (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_SM(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiIndex const sb_size = (ptiIndex) 1 << sb_bits;
/* Data in shared memory */
extern __shared__ ptiValue mempool[];
ptiValue * sm_blocked_mvals = mempool;
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
const ptiIndex num_loops_r = R / blockDim.x;
const ptiIndex rest_loop = R - num_loops_r * blockDim.x;
ptiIndex r;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* TODO: duplicated in registers */
ptiIndex blocked_mode_i = dev_binds[mode][b] << sb_bits;
ptiValue * blocked_mvals = mvals + (dev_binds[mode][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_1 = times_mat_1 + (dev_binds[times_mat_index_1][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_2 = times_mat_2 + (dev_binds[times_mat_index_2][b] << sb_bits) * stride;
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Enough matrix reuse */
if (bptr_end - bptr_begin > sb_size) {
/* Load mats[nmodes] into shared memory, use R instead of stride. */
if (tidy < sb_size) {
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
sm_blocked_mvals[tidy * R + r] = 0;
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
sm_blocked_mvals[tidy * R + r] = 0;
}
}
__syncthreads();
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiElementIndex const mode_i = dev_einds[mode][z];
ptiElementIndex const tmp_i_1 = dev_einds[times_mat_index_1][z];
ptiElementIndex const tmp_i_2 = dev_einds[times_mat_index_2][z];
ptiValue * const bmvals_row = sm_blocked_mvals + mode_i * R;
ptiValue * const blocked_times_mat_1_row = blocked_times_mat_1 + tmp_i_1 * stride;
ptiValue * const blocked_times_mat_2_row = blocked_times_mat_2 + tmp_i_2 * stride;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
} // End loop entries
/* Store back mats[nmodes] from shared memory */
if (tidy < sb_size) {
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
atomicAdd( &(blocked_mvals[tidy * stride + r]), sm_blocked_mvals[tidy * stride + r] );
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
atomicAdd( &(blocked_mvals[tidy * stride + r]), sm_blocked_mvals[tidy * stride + r] );
}
}
} else { /* Not enough matrix reuse */
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiElementIndex const mode_i = dev_einds[mode][z];
ptiElementIndex const tmp_i_1 = dev_einds[times_mat_index_1][z];
ptiElementIndex const tmp_i_2 = dev_einds[times_mat_index_2][z];
ptiValue * const bmvals_row = blocked_mvals + mode_i * stride;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
} // End loop entries
} // End if: block size
} // End if: block range
} // End loop blocks
}
/* impl_num = 16 Matrix Blocked, 2-D, with rank blocking. TODO: BUG EXISTS.
* + switch according to block size
* use shared memory for three matrices
* Limitation: max_R * blockDim.y (max_nnz) <= 1024.
*/
__global__ void pti_MTTKRPKernelRankSplitHiCOORB_3D_MatrixBlocked_AllSM(
const ptiIndex mode,
const ptiIndex nmodes,
const ptiNnzIndex nnz,
const ptiIndex R,
const ptiIndex stride,
const ptiElementIndex sb_bits,
const ptiElementIndex sc_bits,
const ptiNnzIndex blength,
const ptiNnzIndex kptr_begin,
const ptiNnzIndex kptr_end,
ptiIndex * const dev_ndims,
ptiNnzIndex * const dev_cptr,
ptiNnzIndex * const dev_bptr,
ptiBlockIndex ** const dev_binds,
ptiElementIndex ** const dev_einds,
ptiValue * const dev_values,
ptiIndex * const dev_mats_order,
ptiValue ** const dev_mats)
{
ptiIndex const sb_size = (ptiIndex)powf(2, sb_bits);
/* Data in shared memory */
extern __shared__ ptiValue mempool[];
ptiValue * sm_blocked_mvals = mempool;
ptiValue * sm_blocked_times_mat_1 = mempool + sb_size * R * sizeof(ptiValue);
ptiValue * sm_blocked_times_mat_2 = sm_blocked_times_mat_1 + sb_size * R * sizeof(ptiValue);
ptiNnzIndex const all_nblocks = blength;
const ptiIndex tidx = threadIdx.x;
const ptiIndex tidy = threadIdx.y;
ptiNnzIndex z;
const ptiIndex num_loops_r = R / blockDim.x;
const ptiIndex rest_loop = R - num_loops_r * blockDim.x;
ptiIndex r;
ptiValue * const mvals = dev_mats[nmodes];
ptiIndex const times_mat_index_1 = dev_mats_order[1];
ptiValue * const times_mat_1 = dev_mats[times_mat_index_1];
ptiIndex const times_mat_index_2 = dev_mats_order[2];
ptiValue * const times_mat_2 = dev_mats[times_mat_index_2];
ptiNnzIndex num_loops_blocks = 1;
if(all_nblocks > gridDim.x) {
num_loops_blocks = (all_nblocks + gridDim.x - 1) / gridDim.x;
}
for(ptiNnzIndex nb=0; nb<num_loops_blocks; ++nb) {
/* Block level */
ptiNnzIndex b = blockIdx.x + nb * gridDim.x;
if(b < blength) {
/* TODO: duplicated in registers */
ptiIndex blocked_mode_i = dev_binds[mode][b] << sb_bits;
ptiValue * blocked_mvals = mvals + (dev_binds[mode][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_1 = times_mat_1 + (dev_binds[times_mat_index_1][b] << sb_bits) * stride;
ptiValue * blocked_times_mat_2 = times_mat_2 + (dev_binds[times_mat_index_2][b] << sb_bits) * stride;
ptiNnzIndex const bptr_begin = dev_bptr[b];
ptiNnzIndex const bptr_end = dev_bptr[b+1];
/* Enough matrix reuse */
if (bptr_end - bptr_begin > sb_size) {
/* Load mats[nmodes] into shared memory, use R instead of stride. */
if (tidy < sb_size) {
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
sm_blocked_mvals[tidy * R + r] = 0;
sm_blocked_times_mat_1[tidy * R + r] = blocked_times_mat_1[tidy * stride + r];
sm_blocked_times_mat_2[tidy * R + r] = blocked_times_mat_2[tidy * stride + r];
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
sm_blocked_mvals[tidy * R + r] = 0;
sm_blocked_times_mat_1[tidy * R + r] = blocked_times_mat_1[tidy * stride + r];
sm_blocked_times_mat_2[tidy * R + r] = blocked_times_mat_2[tidy * stride + r];
}
}
__syncthreads();
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiElementIndex const mode_i = dev_einds[mode][z];
ptiElementIndex const tmp_i_1 = dev_einds[times_mat_index_1][z];
ptiElementIndex const tmp_i_2 = dev_einds[times_mat_index_2][z];
ptiValue * const bmvals_row = sm_blocked_mvals + mode_i * R;
ptiValue * const blocked_times_mat_1_row = sm_blocked_times_mat_1 + tmp_i_1 * R;
ptiValue * const blocked_times_mat_2_row = sm_blocked_times_mat_2 + tmp_i_2 * R;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * blocked_times_mat_1_row[r] * blocked_times_mat_2_row[r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
} // End loop entries
/* Store back mats[nmodes] from shared memory */
if (tidy < sb_size) {
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
atomicAdd( &(blocked_mvals[tidy * stride + r]), sm_blocked_mvals[tidy * R + r] );
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
atomicAdd( &(blocked_mvals[tidy * stride + r]), sm_blocked_mvals[tidy * R + r] );
}
}
} else { /* Not enough matrix reuse */
/* Thread level */
z = tidy + bptr_begin;
if(z < bptr_end) {
ptiValue const entry = dev_values[z];
ptiElementIndex const mode_i = dev_einds[mode][z];
ptiElementIndex const tmp_i_1 = dev_einds[times_mat_index_1][z];
ptiElementIndex const tmp_i_2 = dev_einds[times_mat_index_2][z];
ptiValue * const bmvals_row = blocked_mvals + mode_i * stride;
ptiValue tmp_val = 0;
for(ptiIndex l=0; l<num_loops_r; ++l) {
r = tidx + l * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
if(rest_loop > 0 && tidx < rest_loop) {
r = tidx + num_loops_r * blockDim.x;
tmp_val = entry * blocked_times_mat_1[tmp_i_1 * stride + r] * blocked_times_mat_2[tmp_i_2 * stride + r];
atomicAdd(&(bmvals_row[r]), tmp_val);
}
} // End loop entries
} // End if: block size
} // End if: block range
} // End loop blocks
}
|
af677d4ded97a1295cf1f953479ba32ad176125a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
#define FILENAME(line) \
FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/awkward_reduce_argmin.cu", \
line)
#include "standard_parallel_algorithms.h"
#include "awkward/kernels.h"
__global__ void
awkward_reduce_argmin_bool_64_kernel(int64_t* toptr,
const bool* fromptr,
const int64_t* parents,
int64_t lenparents) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id < lenparents) {
int64_t parent = parents[thread_id];
if (toptr[parent] == -1 ||
(fromptr[thread_id] != 0) < (fromptr[toptr[parent]] != 0)) {
toptr[parent] = thread_id;
}
}
}
ERROR
awkward_reduce_argmin_bool_64(int64_t* toptr,
const bool* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
HANDLE_ERROR(hipMemset(toptr, -1, sizeof(int64_t) * outlength));
dim3 blocks_per_grid = blocks(lenparents);
dim3 threads_per_block = threads(lenparents);
hipLaunchKernelGGL(( awkward_reduce_argmin_bool_64_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
toptr, fromptr, parents, lenparents);
return success();
}
| af677d4ded97a1295cf1f953479ba32ad176125a.cu | // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
#define FILENAME(line) \
FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/awkward_reduce_argmin.cu", \
line)
#include "standard_parallel_algorithms.h"
#include "awkward/kernels.h"
__global__ void
awkward_reduce_argmin_bool_64_kernel(int64_t* toptr,
const bool* fromptr,
const int64_t* parents,
int64_t lenparents) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id < lenparents) {
int64_t parent = parents[thread_id];
if (toptr[parent] == -1 ||
(fromptr[thread_id] != 0) < (fromptr[toptr[parent]] != 0)) {
toptr[parent] = thread_id;
}
}
}
ERROR
awkward_reduce_argmin_bool_64(int64_t* toptr,
const bool* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
HANDLE_ERROR(cudaMemset(toptr, -1, sizeof(int64_t) * outlength));
dim3 blocks_per_grid = blocks(lenparents);
dim3 threads_per_block = threads(lenparents);
awkward_reduce_argmin_bool_64_kernel<<<blocks_per_grid, threads_per_block>>>(
toptr, fromptr, parents, lenparents);
return success();
}
|
86dfabe342eeaf7aec84941206c2fbded598e2d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author: Janki Bhimani
Northeastern University
Email: [email protected]
*/
#include <stdio.h>
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
__global__ void kernel(float *a, int offset, int x)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int p;
int t = offset + ((i*(x))+j);
{
float q = (float)t;
float s = sinf(q);
float c = cosf(q);
a[t] = a[t] + sqrtf(s*s+c*c); //adding 1 to a
for(p=0;p<200;p++)
{
q = sinf(q);
q = cosf(q);
q = sqrtf(s*s+c*c);
}
}
}
float maxError(float *a, int n)
{
float maxE = 0;
for (int i = 0; i < n; i++) {
float error = fabs(a[i]-1.0f);
if (error > maxE) maxE = error;
}
return maxE;
}
int main(int argc, char **argv)
{
const int blockSize = 1024, nStreams = sqrt(atoi(argv[2]));
int x = atoi(argv[1]);
const int n = x *x * blockSize ;
const int streamSize = n / nStreams/ nStreams;
const int streamBytes = streamSize * sizeof(float);
const int bytes = n * sizeof(float);
int devId = 0;
if (argc > 3) devId = atoi(argv[3]);
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, devId));
printf("Device : %s\n", prop.name);
checkCuda( hipSetDevice(devId) );
dim3 block(32, 32);
dim3 grid((sqrt(n))/32,(sqrt(n))/32);
dim3 grid1((sqrt(n))/nStreams/32, (sqrt(n))/nStreams/32);
x= x* nStreams;
float ms, msk, seq, aloc; // elapsed time in milliseconds
// create events and streams
hipEvent_t startaloc, stopaloc, startEvent, stopEvent, startKernel, stopKernel, dummyEvent;
hipStream_t stream[nStreams];
checkCuda( hipEventCreate(&startaloc) );
checkCuda( hipEventCreate(&stopaloc) );
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
checkCuda( hipEventCreate(&startKernel) );
checkCuda( hipEventCreate(&stopKernel) );
checkCuda( hipEventCreate(&dummyEvent) );
for (int i = 0; i < nStreams* nStreams; ++i)
checkCuda( hipStreamCreate(&stream[i]) );
checkCuda( hipEventRecord(startaloc,0) );
float *a = (float*)malloc(bytes) ;
checkCuda( hipMallocManaged((void**)&a, bytes) ); // device
checkCuda( hipEventRecord(stopaloc, 0) );
checkCuda( hipEventSynchronize(stopaloc) );
checkCuda( hipEventElapsedTime(&aloc, startaloc, stopaloc) );
// baseline case - sequential transfer and execute
memset(a, 0, bytes);
checkCuda( hipEventRecord(startEvent,0) );
checkCuda( hipEventRecord(startKernel,0) );
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(block), 0, 0, a, 0, sqrt(n));
checkCuda(hipDeviceSynchronize());
checkCuda( hipEventRecord(stopKernel, 0) );
checkCuda( hipEventSynchronize(stopKernel) );
checkCuda( hipEventElapsedTime(&msk, startKernel, stopKernel) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
seq = ms;
printf("Time for seq transfer and execute (ms): %f\n", ms+aloc);
printf("Time for kernel execute (ms): %f\n", msk);
printf("Bytes for sequential transfer (bytes): %d\n", bytes);
printf(" max error: %e\n", maxError(a, n));
memset(a, 0, bytes);
checkCuda( hipEventRecord(startEvent,0) );
for (int i = 0; i < nStreams* nStreams; ++i) {
int offset = i * streamSize;
hipLaunchKernelGGL(( kernel), dim3(grid1), dim3(block), 0, stream[i], a, offset, sqrt(n)/nStreams);
checkCuda(hipDeviceSynchronize());
}
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Scheduling scheme type I transfer and execute (ms): %f\n", ms+aloc);
printf(" max error: %e\n", maxError(a, n));
memset(a, 0, bytes);
checkCuda( hipEventRecord(startEvent,0) );
for (int i = 0; i < nStreams* nStreams; ++i)
{
int offset = i * streamSize;
hipLaunchKernelGGL(( kernel), dim3(grid1), dim3(block), 0, stream[i], a, offset, sqrt(n)/nStreams);
checkCuda(hipDeviceSynchronize());
}
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Scheduling scheme type II transfer and execute (ms): %f\n", ms+aloc);
printf(" max error: %e\n", maxError(a, n));
printf("% Overlap (%): %f\n", (seq-ms)/seq*100);
// cleanup
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
checkCuda( hipEventDestroy(dummyEvent) );
for (int i = 0; i < nStreams* nStreams; ++i)
checkCuda( hipStreamDestroy(stream[i]) );
hipFree(a);
//hipFree(a);
return 0;
}
| 86dfabe342eeaf7aec84941206c2fbded598e2d8.cu | /*
Author: Janki Bhimani
Northeastern University
Email: [email protected]
*/
#include <stdio.h>
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__ void kernel(float *a, int offset, int x)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int p;
int t = offset + ((i*(x))+j);
{
float q = (float)t;
float s = sinf(q);
float c = cosf(q);
a[t] = a[t] + sqrtf(s*s+c*c); //adding 1 to a
for(p=0;p<200;p++)
{
q = sinf(q);
q = cosf(q);
q = sqrtf(s*s+c*c);
}
}
}
float maxError(float *a, int n)
{
float maxE = 0;
for (int i = 0; i < n; i++) {
float error = fabs(a[i]-1.0f);
if (error > maxE) maxE = error;
}
return maxE;
}
int main(int argc, char **argv)
{
const int blockSize = 1024, nStreams = sqrt(atoi(argv[2]));
int x = atoi(argv[1]);
const int n = x *x * blockSize ;
const int streamSize = n / nStreams/ nStreams;
const int streamBytes = streamSize * sizeof(float);
const int bytes = n * sizeof(float);
int devId = 0;
if (argc > 3) devId = atoi(argv[3]);
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId));
printf("Device : %s\n", prop.name);
checkCuda( cudaSetDevice(devId) );
dim3 block(32, 32);
dim3 grid((sqrt(n))/32,(sqrt(n))/32);
dim3 grid1((sqrt(n))/nStreams/32, (sqrt(n))/nStreams/32);
x= x* nStreams;
float ms, msk, seq, aloc; // elapsed time in milliseconds
// create events and streams
cudaEvent_t startaloc, stopaloc, startEvent, stopEvent, startKernel, stopKernel, dummyEvent;
cudaStream_t stream[nStreams];
checkCuda( cudaEventCreate(&startaloc) );
checkCuda( cudaEventCreate(&stopaloc) );
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
checkCuda( cudaEventCreate(&startKernel) );
checkCuda( cudaEventCreate(&stopKernel) );
checkCuda( cudaEventCreate(&dummyEvent) );
for (int i = 0; i < nStreams* nStreams; ++i)
checkCuda( cudaStreamCreate(&stream[i]) );
checkCuda( cudaEventRecord(startaloc,0) );
float *a = (float*)malloc(bytes) ;
checkCuda( cudaMallocManaged((void**)&a, bytes) ); // device
checkCuda( cudaEventRecord(stopaloc, 0) );
checkCuda( cudaEventSynchronize(stopaloc) );
checkCuda( cudaEventElapsedTime(&aloc, startaloc, stopaloc) );
// baseline case - sequential transfer and execute
memset(a, 0, bytes);
checkCuda( cudaEventRecord(startEvent,0) );
checkCuda( cudaEventRecord(startKernel,0) );
kernel<<<grid,block>>>(a, 0, sqrt(n));
checkCuda(cudaDeviceSynchronize());
checkCuda( cudaEventRecord(stopKernel, 0) );
checkCuda( cudaEventSynchronize(stopKernel) );
checkCuda( cudaEventElapsedTime(&msk, startKernel, stopKernel) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
seq = ms;
printf("Time for seq transfer and execute (ms): %f\n", ms+aloc);
printf("Time for kernel execute (ms): %f\n", msk);
printf("Bytes for sequential transfer (bytes): %d\n", bytes);
printf(" max error: %e\n", maxError(a, n));
memset(a, 0, bytes);
checkCuda( cudaEventRecord(startEvent,0) );
for (int i = 0; i < nStreams* nStreams; ++i) {
int offset = i * streamSize;
kernel<<<grid1, block, 0, stream[i]>>>(a, offset, sqrt(n)/nStreams);
checkCuda(cudaDeviceSynchronize());
}
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Scheduling scheme type I transfer and execute (ms): %f\n", ms+aloc);
printf(" max error: %e\n", maxError(a, n));
memset(a, 0, bytes);
checkCuda( cudaEventRecord(startEvent,0) );
for (int i = 0; i < nStreams* nStreams; ++i)
{
int offset = i * streamSize;
kernel<<<grid1, block, 0, stream[i]>>>(a, offset, sqrt(n)/nStreams);
checkCuda(cudaDeviceSynchronize());
}
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("Scheduling scheme type II transfer and execute (ms): %f\n", ms+aloc);
printf(" max error: %e\n", maxError(a, n));
printf("% Overlap (%): %f\n", (seq-ms)/seq*100);
// cleanup
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
checkCuda( cudaEventDestroy(dummyEvent) );
for (int i = 0; i < nStreams* nStreams; ++i)
checkCuda( cudaStreamDestroy(stream[i]) );
cudaFree(a);
//cudaFree(a);
return 0;
}
|
83e58d2bf0e37012bb56a9f2bc578559d1f3b960.hip | // !!! This is a file automatically generated by hipify!!!
#include "main-pr.hpp"
#define THROW_AWAY 0
#include "Padded2DArray.hpp"
#include <omp.h>
#include "memutils.hpp"
#include <cmath>
//#define SHOWLOADBALANCE
#include "logged_array.hpp"
//#define LOG
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cusparse_v2.h>
#include <rocblas.h>
#include "helper_cuda.h"
#include "math.h"
#include "tbb/concurrent_queue.h"
#include "AdaptativeUtils.hpp"
template <typename VertexType, typename EdgeType, typename Scalar>
int main_pr(VertexType nVtx, EdgeType* xadj_, VertexType *adj_, Scalar* val_, Scalar *prior_, Scalar* pr_,
Scalar lambda,
int nTry, //algo parameter
util::timestamp& totaltime, std::string& out
)
{
int nb_blocks = 0;
int blk_size = 0;
int nb_threads = 0;
{
char* str = getenv ("NBTHREAD");
if (str) {
std::stringstream ss (str);
ss>>nb_threads;
if (!ss)
std::cerr<<"NBTHREAD invalid"<<std::endl;
}
}
{
char* str = getenv ("NBBLOCK");
if (str) {
std::stringstream ss (str);
ss>>nb_blocks;
if (!ss)
std::cerr<<"NBBLOCK invalid"<<std::endl;
}
}
{
char* str = getenv ("BLKSIZE");
if (str) {
std::stringstream ss (str);
ss>>blk_size;
if (!ss)
std::cerr<<"SUBSIZE invalid"<<std::endl;
}
}
if(nb_threads == 0 ){
std::cerr<<" NBTHREAD=??? "<<std::endl;
exit(0);
}
if(blk_size == 0 ){
std::cerr<<" BLKSIZE=??? "<<std::endl;
exit(0);
}
if(nb_blocks == 0 ){
std::cerr<<" NBBLOCK=??? "<<std::endl;
exit(0);
}
bool coldcache = true;
util::timestamp start(0,0);
//cpuside variables
Scalar* prin_ = new Scalar[nVtx];
EdgeType* xadj = xadj_;
VertexType *adj = adj_;
Scalar* val = val_;
Scalar* prior = prior_;
Scalar* prin = prin_;
Scalar* prout = pr_;
Scalar alpha = lambda;
Scalar beta = 1-lambda;
Scalar alpha1 = lambda;
Scalar beta1 = 1-lambda;
Scalar epsalpha = -1;
Scalar *h_eps0;
Scalar *h_eps1;
//cuda side variable
EdgeType* d_xadj0 ;
VertexType *d_adj0 ;
Scalar* d_val0 ;
Scalar* d_prior0 ;
Scalar* d_prin0 ;
Scalar* d_prout0 ;
Scalar *d_alpha0;
Scalar *d_beta0;
Scalar *d_epsalpha0;
Scalar *d_eps0;
EdgeType* d_xadj1 ;
VertexType *d_adj1 ;
Scalar* d_val1 ;
Scalar* d_prior1 ;
Scalar* d_prin1 ;
Scalar* d_prout1 ;
Scalar *d_alpha1;
Scalar *d_beta1;
Scalar *d_epsalpha1;
Scalar *d_eps1;
/* Get handle to the CUBLAS context */
hipSetDevice(0);
hipblasHandle_t cublasHandle0 = 0;
hipblasStatus_t cublasStatus0;
cublasStatus0 = hipblasCreate(&cublasHandle0);
hipblasSetPointerMode(cublasHandle0, HIPBLAS_POINTER_MODE_DEVICE);
checkCudaErrors( hipSetDevice(1));
hipblasHandle_t cublasHandle1 = 0;
hipblasStatus_t cublasStatus1;
cublasStatus1 = hipblasCreate(&cublasHandle1);
hipblasSetPointerMode(cublasHandle1, HIPBLAS_POINTER_MODE_DEVICE);
/* Get handle to the CUSPARSE context */
hipSetDevice(0);
hipsparseHandle_t cusparseHandle0 = 0;
hipsparseStatus_t cusparseStatus0;
cusparseStatus0 = hipsparseCreate(&cusparseHandle0);
hipsparseMatDescr_t descr0 = 0;
cusparseStatus0 = hipsparseCreateMatDescr(&descr0);
hipsparseSetMatType(descr0,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr0,HIPSPARSE_INDEX_BASE_ZERO);
hipSetDevice(1);
hipsparseHandle_t cusparseHandle1 = 0;
hipsparseStatus_t cusparseStatus1;
cusparseStatus1 = hipsparseCreate(&cusparseHandle1);
hipsparseMatDescr_t descr1 = 0;
cusparseStatus1 = hipsparseCreateMatDescr(&descr1);
hipsparseSetMatType(descr1,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr1,HIPSPARSE_INDEX_BASE_ZERO);
//cuda stream
hipSetDevice(0);
hipStream_t stream0;
hipStreamCreate(&stream0);
hipSetDevice(1);
hipStream_t stream1;
hipStreamCreate(&stream1);
//memalloc
hipSetDevice(0);
checkCudaErrors( hipMalloc((void**)&d_xadj0, (nVtx+1)*sizeof(*xadj)) );
checkCudaErrors( hipMalloc((void**)&d_adj0, (xadj[nVtx])*sizeof(*adj)) );
checkCudaErrors( hipMalloc((void**)&d_val0, (xadj[nVtx])*sizeof(*val)) );
checkCudaErrors( hipMalloc((void**)&d_prior0, (nVtx*sizeof(*prior))));
checkCudaErrors( hipMalloc((void**)&d_prin0, (nVtx*sizeof(*prin)) ));
checkCudaErrors( hipMalloc((void**)&d_prout0, (nVtx*sizeof(*prout)) ));
checkCudaErrors( hipMalloc((void**)&d_epsalpha0, (sizeof(epsalpha)) ));
checkCudaErrors( hipHostMalloc((void**)&h_eps0, (sizeof(*h_eps0)) ));
checkCudaErrors( hipMalloc((void**)&d_eps0, (sizeof(*h_eps0)) ));
hipSetDevice(1);
checkCudaErrors( hipMalloc((void**)&d_xadj1, (nVtx+1)*sizeof(*xadj)) );
checkCudaErrors( hipMalloc((void**)&d_adj1, (xadj[nVtx])*sizeof(*adj)) );
checkCudaErrors( hipMalloc((void**)&d_val1, (xadj[nVtx])*sizeof(*val)) );
checkCudaErrors( hipMalloc((void**)&d_prior1, (nVtx*sizeof(*prior))));
checkCudaErrors( hipMalloc((void**)&d_prin1, (nVtx*sizeof(*prin)) ));
checkCudaErrors( hipMalloc((void**)&d_prout1, (nVtx*sizeof(*prout)) ));
checkCudaErrors( hipMalloc((void**)&d_epsalpha1, (sizeof(epsalpha)) ));
checkCudaErrors( hipHostMalloc((void**)&h_eps1, (sizeof(*h_eps1)) ));
checkCudaErrors( hipMalloc((void**)&d_eps1, (sizeof(*h_eps1)) ));
//cpu to gpu copies
hipSetDevice(0);
checkCudaErrors( hipMemcpy(d_xadj0, xadj, (nVtx+1)*sizeof(*xadj), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_adj0, adj, (xadj[nVtx])*sizeof(*adj), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_val0, val, (xadj[nVtx])*sizeof(*val), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_prior0, prior, nVtx*sizeof(*prior), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_epsalpha0, &epsalpha, sizeof(epsalpha), hipMemcpyHostToDevice) );
hipSetDevice(1);
checkCudaErrors( hipMemcpy(d_xadj1, xadj, (nVtx+1)*sizeof(*xadj), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_adj1, adj, (xadj[nVtx])*sizeof(*adj), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_val1, val, (xadj[nVtx])*sizeof(*val), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_prior1, prior, nVtx*sizeof(*prior), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_epsalpha1, &epsalpha, sizeof(epsalpha), hipMemcpyHostToDevice) );
int nRows = nVtx;
unsigned long* rowBlocks;
const int nThreadPerBlock = nb_threads;
const unsigned int blkSize = blk_size;
const unsigned int blkMultiplier = 3;
const unsigned int rows_for_vector = 2;
const bool allocate_row_blocks = true;
//device 0 variable
unsigned long* d_rowBlocks0;
unsigned int* d_blkSize0;
unsigned int* d_rows_for_vector0;
unsigned int* d_blkMultiplier0;
float* d_a0;
float* d_b0;
//device 1 variable
unsigned long* d_rowBlocks1;
unsigned int* d_blkSize1;
unsigned int* d_rows_for_vector1;
unsigned int* d_blkMultiplier1;
float* d_a1;
float* d_b1;
//added for test
int *test_GPU0;
int *test_GPU1;
int rowBlockSize1;
int rowBlockSize2;
//calculer rowBlockSize
rowBlockSize1 = ComputeRowBlocksSize<int,int>(xadj, nVtx, blkSize, blkMultiplier, rows_for_vector, nThreadPerBlock);
//cout << "rowBlockSize1 : " << rowBlockSize1 << endl;
//declarer rowBlocks
rowBlocks = (unsigned long*) calloc(sizeof(unsigned long),rowBlockSize1);
//calculer rowBlocks
ComputeRowBlocks<int,int>( rowBlocks, rowBlockSize2, xadj, nVtx, blkSize, blkMultiplier, rows_for_vector, nThreadPerBlock, allocate_row_blocks);
//cout << "rowBlockSize2 : " << rowBlockSize2 <<endl;
int end = ((rowBlocks[rowBlockSize1] >> (64-32)) & ((1UL << 32) - 1UL));
// cout << " end : " << end <<endl;
// if(end == 0){
// rowBlockSize1--;
// }
int mediumRowblocks = cutRowBlocks(rowBlocks, rowBlockSize1);
int part2 = rowBlockSize1 - mediumRowblocks;
// int medium = ((rowBlocks[mediumRowblocks] >> (64-32)) & ((1UL << 32) - 1UL));
// end = ((rowBlocks[rowBlockSize1] >> (64-32)) & ((1UL << 32) - 1UL));
// cout << "rowBlockSize1 : " << rowBlockSize1 << endl;
// cout << "mediumRowBlocks :" << mediumRowblocks << endl;
// cout << " - medium : " << medium <<endl;
// cout << " - part2 = " << part2 << endl;
// cout << " - end : -- > " << end << endl;
int tab0[] = {1, 2, 3};
int tab1[] = {4, 5, 6};
hipSetDevice(0);
//add for test
checkCudaErrors(hipMalloc((void**)&test_GPU0, 3*sizeof(int)));
checkCudaErrors(hipMemcpy(test_GPU0, tab0, 3*sizeof(int), hipMemcpyHostToDevice));
hipSetDevice(1);
checkCudaErrors(hipMalloc((void**)&test_GPU1, 3*sizeof(int)));
checkCudaErrors( hipMemcpy(test_GPU1, tab1, 3*sizeof(int), hipMemcpyHostToDevice));
hipSetDevice(0);
//malloc for device 0 variable
checkCudaErrors( hipMalloc((void**)&d_rowBlocks0, (rowBlockSize1*sizeof(unsigned long))));
checkCudaErrors( hipMalloc((void**)&d_blkSize0, 1*sizeof(unsigned int)));
checkCudaErrors( hipMalloc((void**)&d_rows_for_vector0,1*sizeof(unsigned int)));
checkCudaErrors( hipMalloc((void**)&d_blkMultiplier0, 1*sizeof(unsigned int)));
checkCudaErrors( hipMalloc((void**)&d_a0, 1*sizeof(float)));
checkCudaErrors( hipMalloc((void**)&d_b0, 1*sizeof(float)));
//malloc for device 1 variable
hipSetDevice(1);
checkCudaErrors( hipMalloc((void**)&d_rowBlocks1, (rowBlockSize1*sizeof(unsigned long))));
checkCudaErrors( hipMalloc((void**)&d_blkSize1, 1*sizeof(unsigned int)));
checkCudaErrors( hipMalloc((void**)&d_rows_for_vector1,1*sizeof(unsigned int)));
checkCudaErrors( hipMalloc((void**)&d_blkMultiplier1, 1*sizeof(unsigned int)));
checkCudaErrors( hipMalloc((void**)&d_a1, 1*sizeof(float)));
checkCudaErrors( hipMalloc((void**)&d_b1, 1*sizeof(float)));
//send data to device 0
hipSetDevice(0);
checkCudaErrors( hipMemcpy(d_rowBlocks0, rowBlocks, rowBlockSize1*sizeof(unsigned long), hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy(d_blkSize0, &blkSize, 1*sizeof(unsigned int), hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy(d_rows_for_vector0, &rows_for_vector, 1*sizeof(unsigned int), hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy(d_blkMultiplier0, &blkMultiplier, 1*sizeof(unsigned int), hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy(d_a0, &alpha, 1*sizeof(Scalar), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_b0, &beta, 1*sizeof(Scalar), hipMemcpyHostToDevice) );
//send data to device 1
hipSetDevice(1);
checkCudaErrors( hipMemcpy(d_rowBlocks1, rowBlocks, rowBlockSize1*sizeof(unsigned long), hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy(d_blkSize1, &blkSize, 1*sizeof(unsigned int), hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy(d_rows_for_vector1, &rows_for_vector, 1*sizeof(unsigned int), hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy(d_blkMultiplier1, &blkMultiplier, 1*sizeof(unsigned int), hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy(d_a1, &alpha, 1*sizeof(Scalar), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_b1, &beta, 1*sizeof(Scalar), hipMemcpyHostToDevice) );
// prepar stream config
list<Task> *tasks = new list<Task>;
tbb::concurrent_bounded_queue<stream_container<int,int,float>* >* streams = new tbb::concurrent_bounded_queue<stream_container<int,int,float>* >;
int mmshared_size = (blkSize + 1) * sizeof(float);
// int nb_blocks = 512;
int stream_number = 2;
int X, subsize;
X = (int) rowBlockSize1/(nb_blocks) ;
if(X % 64 == 0){
subsize = X;
}else{
X = X / 64 ;
subsize = (X+1) * 64;
}
int xadjPtr1 = ((rowBlocks[rowBlockSize1] >> (64-32)) & ((1UL << 32) - 1UL));
cout << "rowBlockSize : "<< rowBlockSize1 << "last row " << xadjPtr1 << endl;
cout << "subsize : "<< subsize << endl;
cout << "start creat stream " <<endl;
creat_stream_2gpus<int, int, float>(d_rowBlocks0, d_a0, d_b0, d_val0, d_xadj0, d_adj0, d_prin0, d_prout0, d_blkSize0, d_rows_for_vector0, d_blkMultiplier0, d_rowBlocks1, d_a1, d_b1, d_val1, d_xadj1, d_adj1, d_prin1, d_prout1, d_blkSize1, d_rows_for_vector1, d_blkMultiplier1, streams, stream_number );
cout << "end creat stream " <<endl;
cout << "start split task " <<endl;
int nb_tasks = split_input_to_tasks(rowBlocks, rowBlockSize1, subsize, *tasks);
cout << "fin split task " <<endl;
cout << "nb_tasks " << nb_tasks << endl;
//add for test
int *gpu0;
int *gpu1;
int size = (blkSize) * sizeof(float);
hipSetDevice(1);
gpu1 = (int*) calloc(sizeof(int), 3);
hipSetDevice(0);
gpu0 = (int*) calloc(sizeof(int), 3);
int medium;
for (int TRY=0; TRY<THROW_AWAY+nTry; ++TRY)
{
if (TRY >= THROW_AWAY)
start = util::timestamp();
int maxiter = 40;
medium = ((rowBlocks[mediumRowblocks] >> (64-32)) & ((1UL << 32) - 1UL));
//for GPU0
hipSetDevice(0);
//setup prin
//hipMemcpyAsync(d_prin0, d_prior0, nVtx*sizeof(*prior), hipMemcpyDeviceToDevice,stream0);
hipMemcpyAsync(d_prin0, d_prior0, (medium)*sizeof(*prior), hipMemcpyDeviceToDevice,stream0);
hipSetDevice(1);
//setup prin
hipMemcpyAsync(d_prin1+medium, d_prior1+medium, (nVtx-medium)*sizeof(*prior), hipMemcpyDeviceToDevice,stream1);
hipSetDevice(1);
checkCudaErrors( hipStreamSynchronize(stream1));
hipSetDevice(0);
checkCudaErrors( hipStreamSynchronize(stream0));
for (int iter = 0; iter < maxiter ; ++ iter) {
int top = 0;
int bottom = nb_tasks;
hipSetDevice(1);
hipMemcpyAsync(d_prin1, d_prin0, (medium)*sizeof(*d_prin0), hipMemcpyDeviceToDevice, stream1);
hipSetDevice(0);
hipMemcpyAsync(d_prin0+medium, d_prin1+medium, (nVtx-medium)*sizeof(*d_prin0), hipMemcpyDeviceToDevice, stream0);
hipSetDevice(0);
hipMemcpyAsync(d_prout0, d_prior0, (medium)*sizeof(*prior), hipMemcpyDeviceToDevice, stream0);
hipSetDevice(1);
hipMemcpyAsync(d_prout1+medium, d_prior1+medium, (nVtx-medium)*sizeof(*prior), hipMemcpyDeviceToDevice, stream1);
hipSetDevice(0);
hipDeviceSynchronize();
hipSetDevice(1);
hipDeviceSynchronize();
int iteration = 0;
while(top < bottom){
iteration++;
//std::cout << " while : "<< std::endl;
stream_container<int, int, float> *current_stream;
streams->pop(current_stream);
if(current_stream->device == 0){
// std::cout << "0 top++ : " << top <<std::endl;
Task t = get_task(tasks, top++);
put_work_on_stream<int,int,float>(current_stream,t);
}else{
// std::cout << "1 bottom-- " << bottom << std::endl;
Task t = get_task(tasks, --bottom);
put_work_on_stream<int,int,float>(current_stream,t);
}
hipSetDevice(current_stream->device);
cudaPrintError("befor kernel");
hipLaunchKernelGGL(( csr_adaptative), dim3((current_stream->rowBlockSize + 1 )) , dim3(nThreadPerBlock), mmshared_size, current_stream->stream , current_stream->d_val, current_stream->d_adj, current_stream->d_xadj, current_stream->d_prin, current_stream->d_prout, (current_stream->d_rowBlocks + current_stream->rowBlocksPtr ), current_stream->alpha, current_stream->beta, current_stream->d_blkSize, current_stream->d_blkMultiplier, current_stream->d_rows_for_vector, current_stream->rowBlockSize);
cudaPrintError("after kernel");
hipStreamAddCallback(current_stream->stream, call_back , current_stream , 0);
if(current_stream->device == 1)
medium = ((rowBlocks[current_stream->rowBlocksPtr] >> (64-32)) & ((1UL << 32) - 1UL));;
cudaPrintError("after callbacj");
}
hipSetDevice(0);
hipDeviceSynchronize();
hipSetDevice(1);
hipDeviceSynchronize();
//compute epsilon
//using prin to compute epsilon
hipSetDevice(0);
hipblasSetStream(cublasHandle0, stream0);
hipblasSaxpy (cublasHandle0, medium, d_epsalpha0, d_prout0, 1, d_prin0, 1); // d_prin = d_prout*-1 + d_prin
hipblasSasum (cublasHandle0, medium, d_prin0, 1, d_eps0);
hipMemcpyAsync(h_eps0, d_eps0, sizeof(*d_eps0), hipMemcpyDeviceToHost, stream0);
// hipMemcpyAsync(d_prin0, d_prout0, nVtx*sizeof(*prout), hipMemcpyDeviceToDevice, stream0);//prepare prin for next iteration
//compute epsilon
//using prin to compute epsilon
hipSetDevice(1);
hipblasSetStream(cublasHandle1, stream1);
hipblasSaxpy (cublasHandle1, (nVtx-medium), d_epsalpha1, d_prout1+medium, 1, d_prin1+medium, 1); // d_prin = d_prout*-1 + d_prin
hipblasSasum(cublasHandle1, nVtx-medium, d_prin1+medium, 1, d_eps1);
hipMemcpyAsync(h_eps1, d_eps1, sizeof(*h_eps1), hipMemcpyDeviceToHost, stream1);
hipSetDevice(1);
hipMemcpyAsync(d_prin1+medium, d_prout1+medium, (nVtx-medium)*sizeof(*prout), hipMemcpyDeviceToDevice,stream1);//prepare prin for next iteration
// checkCudaErrors(hipMemcpy(gpu1, test_GPU1, 3*sizeof(int), hipMemcpyDeviceToHost));
hipSetDevice(0);
hipMemcpyAsync(d_prin0, d_prout0, (medium)*sizeof(*prout), hipMemcpyDeviceToDevice, stream0);//prepare prin for next iteration
// checkCudaErrors(hipMemcpy(gpu0, test_GPU0, 3*sizeof(int), hipMemcpyDeviceToHost));
hipSetDevice(1);
checkCudaErrors( hipStreamSynchronize(stream1));
hipSetDevice(0);
checkCudaErrors( hipStreamSynchronize(stream0));
//stopping condition
std::cerr<<*h_eps0<< " + " << *h_eps1<< " = " << *h_eps0+ *h_eps1 << std::endl;
//if (*h_eps0 +*h_eps1 < 0) // deactivited for testing purposes
// iter = maxiter;
// std::cerr << " GPU0=("<<gpu0[0]<< ", "<< gpu0[1]<< ", "<< gpu0[2]<<") ";
// std::cerr << " GPU1=("<<gpu1[0]<< ", "<< gpu1[1]<< ", "<< gpu1[2] << ")" << std::endl;
}
hipSetDevice(0);
checkCudaErrors(hipMemcpy(prout, d_prout0, nVtx*sizeof(*prout), hipMemcpyDeviceToHost));
std::cerr<<" PR="<<prout[0]<< " " << prout[1] << " " << prout[2] << std::endl;
std::cerr<<endl;
if (TRY >= THROW_AWAY)
{
util::timestamp stop;
totaltime += stop - start;
}
}
hipSetDevice(0);
hipDeviceReset();
hipSetDevice(1);
hipDeviceReset();
delete[] prin_;
{
std::stringstream ss;
ss<<"part1V: "<< medium <<" part1E: "<<xadj[medium+1]
<<" part2V: "<<nVtx-(medium)<<" part2E: "<< xadj[nVtx] - xadj[medium+1];
out = ss.str();
}
return 0;
}
| 83e58d2bf0e37012bb56a9f2bc578559d1f3b960.cu | #include "main-pr.hpp"
#define THROW_AWAY 0
#include "Padded2DArray.hpp"
#include <omp.h>
#include "memutils.hpp"
#include <cmath>
//#define SHOWLOADBALANCE
#include "logged_array.hpp"
//#define LOG
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cusparse_v2.h>
#include <cublas_v2.h>
#include "helper_cuda.h"
#include "math.h"
#include "tbb/concurrent_queue.h"
#include "AdaptativeUtils.hpp"
template <typename VertexType, typename EdgeType, typename Scalar>
int main_pr(VertexType nVtx, EdgeType* xadj_, VertexType *adj_, Scalar* val_, Scalar *prior_, Scalar* pr_,
Scalar lambda,
int nTry, //algo parameter
util::timestamp& totaltime, std::string& out
)
{
int nb_blocks = 0;
int blk_size = 0;
int nb_threads = 0;
{
char* str = getenv ("NBTHREAD");
if (str) {
std::stringstream ss (str);
ss>>nb_threads;
if (!ss)
std::cerr<<"NBTHREAD invalid"<<std::endl;
}
}
{
char* str = getenv ("NBBLOCK");
if (str) {
std::stringstream ss (str);
ss>>nb_blocks;
if (!ss)
std::cerr<<"NBBLOCK invalid"<<std::endl;
}
}
{
char* str = getenv ("BLKSIZE");
if (str) {
std::stringstream ss (str);
ss>>blk_size;
if (!ss)
std::cerr<<"SUBSIZE invalid"<<std::endl;
}
}
if(nb_threads == 0 ){
std::cerr<<" NBTHREAD=??? "<<std::endl;
exit(0);
}
if(blk_size == 0 ){
std::cerr<<" BLKSIZE=??? "<<std::endl;
exit(0);
}
if(nb_blocks == 0 ){
std::cerr<<" NBBLOCK=??? "<<std::endl;
exit(0);
}
bool coldcache = true;
util::timestamp start(0,0);
//cpuside variables
Scalar* prin_ = new Scalar[nVtx];
EdgeType* xadj = xadj_;
VertexType *adj = adj_;
Scalar* val = val_;
Scalar* prior = prior_;
Scalar* prin = prin_;
Scalar* prout = pr_;
Scalar alpha = lambda;
Scalar beta = 1-lambda;
Scalar alpha1 = lambda;
Scalar beta1 = 1-lambda;
Scalar epsalpha = -1;
Scalar *h_eps0;
Scalar *h_eps1;
//cuda side variable
EdgeType* d_xadj0 ;
VertexType *d_adj0 ;
Scalar* d_val0 ;
Scalar* d_prior0 ;
Scalar* d_prin0 ;
Scalar* d_prout0 ;
Scalar *d_alpha0;
Scalar *d_beta0;
Scalar *d_epsalpha0;
Scalar *d_eps0;
EdgeType* d_xadj1 ;
VertexType *d_adj1 ;
Scalar* d_val1 ;
Scalar* d_prior1 ;
Scalar* d_prin1 ;
Scalar* d_prout1 ;
Scalar *d_alpha1;
Scalar *d_beta1;
Scalar *d_epsalpha1;
Scalar *d_eps1;
/* Get handle to the CUBLAS context */
cudaSetDevice(0);
cublasHandle_t cublasHandle0 = 0;
cublasStatus_t cublasStatus0;
cublasStatus0 = cublasCreate(&cublasHandle0);
cublasSetPointerMode(cublasHandle0, CUBLAS_POINTER_MODE_DEVICE);
checkCudaErrors( cudaSetDevice(1));
cublasHandle_t cublasHandle1 = 0;
cublasStatus_t cublasStatus1;
cublasStatus1 = cublasCreate(&cublasHandle1);
cublasSetPointerMode(cublasHandle1, CUBLAS_POINTER_MODE_DEVICE);
/* Get handle to the CUSPARSE context */
cudaSetDevice(0);
cusparseHandle_t cusparseHandle0 = 0;
cusparseStatus_t cusparseStatus0;
cusparseStatus0 = cusparseCreate(&cusparseHandle0);
cusparseMatDescr_t descr0 = 0;
cusparseStatus0 = cusparseCreateMatDescr(&descr0);
cusparseSetMatType(descr0,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr0,CUSPARSE_INDEX_BASE_ZERO);
cudaSetDevice(1);
cusparseHandle_t cusparseHandle1 = 0;
cusparseStatus_t cusparseStatus1;
cusparseStatus1 = cusparseCreate(&cusparseHandle1);
cusparseMatDescr_t descr1 = 0;
cusparseStatus1 = cusparseCreateMatDescr(&descr1);
cusparseSetMatType(descr1,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr1,CUSPARSE_INDEX_BASE_ZERO);
//cuda stream
cudaSetDevice(0);
cudaStream_t stream0;
cudaStreamCreate(&stream0);
cudaSetDevice(1);
cudaStream_t stream1;
cudaStreamCreate(&stream1);
//memalloc
cudaSetDevice(0);
checkCudaErrors( cudaMalloc((void**)&d_xadj0, (nVtx+1)*sizeof(*xadj)) );
checkCudaErrors( cudaMalloc((void**)&d_adj0, (xadj[nVtx])*sizeof(*adj)) );
checkCudaErrors( cudaMalloc((void**)&d_val0, (xadj[nVtx])*sizeof(*val)) );
checkCudaErrors( cudaMalloc((void**)&d_prior0, (nVtx*sizeof(*prior))));
checkCudaErrors( cudaMalloc((void**)&d_prin0, (nVtx*sizeof(*prin)) ));
checkCudaErrors( cudaMalloc((void**)&d_prout0, (nVtx*sizeof(*prout)) ));
checkCudaErrors( cudaMalloc((void**)&d_epsalpha0, (sizeof(epsalpha)) ));
checkCudaErrors( cudaMallocHost((void**)&h_eps0, (sizeof(*h_eps0)) ));
checkCudaErrors( cudaMalloc((void**)&d_eps0, (sizeof(*h_eps0)) ));
cudaSetDevice(1);
checkCudaErrors( cudaMalloc((void**)&d_xadj1, (nVtx+1)*sizeof(*xadj)) );
checkCudaErrors( cudaMalloc((void**)&d_adj1, (xadj[nVtx])*sizeof(*adj)) );
checkCudaErrors( cudaMalloc((void**)&d_val1, (xadj[nVtx])*sizeof(*val)) );
checkCudaErrors( cudaMalloc((void**)&d_prior1, (nVtx*sizeof(*prior))));
checkCudaErrors( cudaMalloc((void**)&d_prin1, (nVtx*sizeof(*prin)) ));
checkCudaErrors( cudaMalloc((void**)&d_prout1, (nVtx*sizeof(*prout)) ));
checkCudaErrors( cudaMalloc((void**)&d_epsalpha1, (sizeof(epsalpha)) ));
checkCudaErrors( cudaMallocHost((void**)&h_eps1, (sizeof(*h_eps1)) ));
checkCudaErrors( cudaMalloc((void**)&d_eps1, (sizeof(*h_eps1)) ));
//cpu to gpu copies
cudaSetDevice(0);
checkCudaErrors( cudaMemcpy(d_xadj0, xadj, (nVtx+1)*sizeof(*xadj), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_adj0, adj, (xadj[nVtx])*sizeof(*adj), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_val0, val, (xadj[nVtx])*sizeof(*val), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_prior0, prior, nVtx*sizeof(*prior), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_epsalpha0, &epsalpha, sizeof(epsalpha), cudaMemcpyHostToDevice) );
cudaSetDevice(1);
checkCudaErrors( cudaMemcpy(d_xadj1, xadj, (nVtx+1)*sizeof(*xadj), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_adj1, adj, (xadj[nVtx])*sizeof(*adj), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_val1, val, (xadj[nVtx])*sizeof(*val), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_prior1, prior, nVtx*sizeof(*prior), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_epsalpha1, &epsalpha, sizeof(epsalpha), cudaMemcpyHostToDevice) );
int nRows = nVtx;
unsigned long* rowBlocks;
const int nThreadPerBlock = nb_threads;
const unsigned int blkSize = blk_size;
const unsigned int blkMultiplier = 3;
const unsigned int rows_for_vector = 2;
const bool allocate_row_blocks = true;
//device 0 variable
unsigned long* d_rowBlocks0;
unsigned int* d_blkSize0;
unsigned int* d_rows_for_vector0;
unsigned int* d_blkMultiplier0;
float* d_a0;
float* d_b0;
//device 1 variable
unsigned long* d_rowBlocks1;
unsigned int* d_blkSize1;
unsigned int* d_rows_for_vector1;
unsigned int* d_blkMultiplier1;
float* d_a1;
float* d_b1;
//added for test
int *test_GPU0;
int *test_GPU1;
int rowBlockSize1;
int rowBlockSize2;
//calculer rowBlockSize
rowBlockSize1 = ComputeRowBlocksSize<int,int>(xadj, nVtx, blkSize, blkMultiplier, rows_for_vector, nThreadPerBlock);
//cout << "rowBlockSize1 : " << rowBlockSize1 << endl;
//declarer rowBlocks
rowBlocks = (unsigned long*) calloc(sizeof(unsigned long),rowBlockSize1);
//calculer rowBlocks
ComputeRowBlocks<int,int>( rowBlocks, rowBlockSize2, xadj, nVtx, blkSize, blkMultiplier, rows_for_vector, nThreadPerBlock, allocate_row_blocks);
//cout << "rowBlockSize2 : " << rowBlockSize2 <<endl;
int end = ((rowBlocks[rowBlockSize1] >> (64-32)) & ((1UL << 32) - 1UL));
// cout << " end : " << end <<endl;
// if(end == 0){
// rowBlockSize1--;
// }
int mediumRowblocks = cutRowBlocks(rowBlocks, rowBlockSize1);
int part2 = rowBlockSize1 - mediumRowblocks;
// int medium = ((rowBlocks[mediumRowblocks] >> (64-32)) & ((1UL << 32) - 1UL));
// end = ((rowBlocks[rowBlockSize1] >> (64-32)) & ((1UL << 32) - 1UL));
// cout << "rowBlockSize1 : " << rowBlockSize1 << endl;
// cout << "mediumRowBlocks :" << mediumRowblocks << endl;
// cout << " - medium : " << medium <<endl;
// cout << " - part2 = " << part2 << endl;
// cout << " - end : -- > " << end << endl;
int tab0[] = {1, 2, 3};
int tab1[] = {4, 5, 6};
cudaSetDevice(0);
//add for test
checkCudaErrors(cudaMalloc((void**)&test_GPU0, 3*sizeof(int)));
checkCudaErrors(cudaMemcpy(test_GPU0, tab0, 3*sizeof(int), cudaMemcpyHostToDevice));
cudaSetDevice(1);
checkCudaErrors(cudaMalloc((void**)&test_GPU1, 3*sizeof(int)));
checkCudaErrors( cudaMemcpy(test_GPU1, tab1, 3*sizeof(int), cudaMemcpyHostToDevice));
cudaSetDevice(0);
//malloc for device 0 variable
checkCudaErrors( cudaMalloc((void**)&d_rowBlocks0, (rowBlockSize1*sizeof(unsigned long))));
checkCudaErrors( cudaMalloc((void**)&d_blkSize0, 1*sizeof(unsigned int)));
checkCudaErrors( cudaMalloc((void**)&d_rows_for_vector0,1*sizeof(unsigned int)));
checkCudaErrors( cudaMalloc((void**)&d_blkMultiplier0, 1*sizeof(unsigned int)));
checkCudaErrors( cudaMalloc((void**)&d_a0, 1*sizeof(float)));
checkCudaErrors( cudaMalloc((void**)&d_b0, 1*sizeof(float)));
//malloc for device 1 variable
cudaSetDevice(1);
checkCudaErrors( cudaMalloc((void**)&d_rowBlocks1, (rowBlockSize1*sizeof(unsigned long))));
checkCudaErrors( cudaMalloc((void**)&d_blkSize1, 1*sizeof(unsigned int)));
checkCudaErrors( cudaMalloc((void**)&d_rows_for_vector1,1*sizeof(unsigned int)));
checkCudaErrors( cudaMalloc((void**)&d_blkMultiplier1, 1*sizeof(unsigned int)));
checkCudaErrors( cudaMalloc((void**)&d_a1, 1*sizeof(float)));
checkCudaErrors( cudaMalloc((void**)&d_b1, 1*sizeof(float)));
//send data to device 0
cudaSetDevice(0);
checkCudaErrors( cudaMemcpy(d_rowBlocks0, rowBlocks, rowBlockSize1*sizeof(unsigned long), cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy(d_blkSize0, &blkSize, 1*sizeof(unsigned int), cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy(d_rows_for_vector0, &rows_for_vector, 1*sizeof(unsigned int), cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy(d_blkMultiplier0, &blkMultiplier, 1*sizeof(unsigned int), cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy(d_a0, &alpha, 1*sizeof(Scalar), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_b0, &beta, 1*sizeof(Scalar), cudaMemcpyHostToDevice) );
//send data to device 1
cudaSetDevice(1);
checkCudaErrors( cudaMemcpy(d_rowBlocks1, rowBlocks, rowBlockSize1*sizeof(unsigned long), cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy(d_blkSize1, &blkSize, 1*sizeof(unsigned int), cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy(d_rows_for_vector1, &rows_for_vector, 1*sizeof(unsigned int), cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy(d_blkMultiplier1, &blkMultiplier, 1*sizeof(unsigned int), cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy(d_a1, &alpha, 1*sizeof(Scalar), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_b1, &beta, 1*sizeof(Scalar), cudaMemcpyHostToDevice) );
// prepar stream config
list<Task> *tasks = new list<Task>;
tbb::concurrent_bounded_queue<stream_container<int,int,float>* >* streams = new tbb::concurrent_bounded_queue<stream_container<int,int,float>* >;
int mmshared_size = (blkSize + 1) * sizeof(float);
// int nb_blocks = 512;
int stream_number = 2;
int X, subsize;
X = (int) rowBlockSize1/(nb_blocks) ;
if(X % 64 == 0){
subsize = X;
}else{
X = X / 64 ;
subsize = (X+1) * 64;
}
int xadjPtr1 = ((rowBlocks[rowBlockSize1] >> (64-32)) & ((1UL << 32) - 1UL));
cout << "rowBlockSize : "<< rowBlockSize1 << "last row " << xadjPtr1 << endl;
cout << "subsize : "<< subsize << endl;
cout << "start creat stream " <<endl;
creat_stream_2gpus<int, int, float>(d_rowBlocks0, d_a0, d_b0, d_val0, d_xadj0, d_adj0, d_prin0, d_prout0, d_blkSize0, d_rows_for_vector0, d_blkMultiplier0, d_rowBlocks1, d_a1, d_b1, d_val1, d_xadj1, d_adj1, d_prin1, d_prout1, d_blkSize1, d_rows_for_vector1, d_blkMultiplier1, streams, stream_number );
cout << "end creat stream " <<endl;
cout << "start split task " <<endl;
int nb_tasks = split_input_to_tasks(rowBlocks, rowBlockSize1, subsize, *tasks);
cout << "fin split task " <<endl;
cout << "nb_tasks " << nb_tasks << endl;
//add for test
int *gpu0;
int *gpu1;
int size = (blkSize) * sizeof(float);
cudaSetDevice(1);
gpu1 = (int*) calloc(sizeof(int), 3);
cudaSetDevice(0);
gpu0 = (int*) calloc(sizeof(int), 3);
int medium;
for (int TRY=0; TRY<THROW_AWAY+nTry; ++TRY)
{
if (TRY >= THROW_AWAY)
start = util::timestamp();
int maxiter = 40;
medium = ((rowBlocks[mediumRowblocks] >> (64-32)) & ((1UL << 32) - 1UL));
//for GPU0
cudaSetDevice(0);
//setup prin
//cudaMemcpyAsync(d_prin0, d_prior0, nVtx*sizeof(*prior), cudaMemcpyDeviceToDevice,stream0);
cudaMemcpyAsync(d_prin0, d_prior0, (medium)*sizeof(*prior), cudaMemcpyDeviceToDevice,stream0);
cudaSetDevice(1);
//setup prin
cudaMemcpyAsync(d_prin1+medium, d_prior1+medium, (nVtx-medium)*sizeof(*prior), cudaMemcpyDeviceToDevice,stream1);
cudaSetDevice(1);
checkCudaErrors( cudaStreamSynchronize(stream1));
cudaSetDevice(0);
checkCudaErrors( cudaStreamSynchronize(stream0));
for (int iter = 0; iter < maxiter ; ++ iter) {
int top = 0;
int bottom = nb_tasks;
cudaSetDevice(1);
cudaMemcpyAsync(d_prin1, d_prin0, (medium)*sizeof(*d_prin0), cudaMemcpyDeviceToDevice, stream1);
cudaSetDevice(0);
cudaMemcpyAsync(d_prin0+medium, d_prin1+medium, (nVtx-medium)*sizeof(*d_prin0), cudaMemcpyDeviceToDevice, stream0);
cudaSetDevice(0);
cudaMemcpyAsync(d_prout0, d_prior0, (medium)*sizeof(*prior), cudaMemcpyDeviceToDevice, stream0);
cudaSetDevice(1);
cudaMemcpyAsync(d_prout1+medium, d_prior1+medium, (nVtx-medium)*sizeof(*prior), cudaMemcpyDeviceToDevice, stream1);
cudaSetDevice(0);
cudaDeviceSynchronize();
cudaSetDevice(1);
cudaDeviceSynchronize();
int iteration = 0;
while(top < bottom){
iteration++;
//std::cout << " while : "<< std::endl;
stream_container<int, int, float> *current_stream;
streams->pop(current_stream);
if(current_stream->device == 0){
// std::cout << "0 top++ : " << top <<std::endl;
Task t = get_task(tasks, top++);
put_work_on_stream<int,int,float>(current_stream,t);
}else{
// std::cout << "1 bottom-- " << bottom << std::endl;
Task t = get_task(tasks, --bottom);
put_work_on_stream<int,int,float>(current_stream,t);
}
cudaSetDevice(current_stream->device);
cudaPrintError("befor kernel");
csr_adaptative<<<(current_stream->rowBlockSize + 1 ) , nThreadPerBlock, mmshared_size, current_stream->stream >>>(current_stream->d_val, current_stream->d_adj, current_stream->d_xadj, current_stream->d_prin, current_stream->d_prout, (current_stream->d_rowBlocks + current_stream->rowBlocksPtr ), current_stream->alpha, current_stream->beta, current_stream->d_blkSize, current_stream->d_blkMultiplier, current_stream->d_rows_for_vector, current_stream->rowBlockSize);
cudaPrintError("after kernel");
cudaStreamAddCallback(current_stream->stream, call_back , current_stream , 0);
if(current_stream->device == 1)
medium = ((rowBlocks[current_stream->rowBlocksPtr] >> (64-32)) & ((1UL << 32) - 1UL));;
cudaPrintError("after callbacj");
}
cudaSetDevice(0);
cudaDeviceSynchronize();
cudaSetDevice(1);
cudaDeviceSynchronize();
//compute epsilon
//using prin to compute epsilon
cudaSetDevice(0);
cublasSetStream(cublasHandle0, stream0);
cublasSaxpy (cublasHandle0, medium, d_epsalpha0, d_prout0, 1, d_prin0, 1); // d_prin = d_prout*-1 + d_prin
cublasSasum (cublasHandle0, medium, d_prin0, 1, d_eps0);
cudaMemcpyAsync(h_eps0, d_eps0, sizeof(*d_eps0), cudaMemcpyDeviceToHost, stream0);
// cudaMemcpyAsync(d_prin0, d_prout0, nVtx*sizeof(*prout), cudaMemcpyDeviceToDevice, stream0);//prepare prin for next iteration
//compute epsilon
//using prin to compute epsilon
cudaSetDevice(1);
cublasSetStream(cublasHandle1, stream1);
cublasSaxpy (cublasHandle1, (nVtx-medium), d_epsalpha1, d_prout1+medium, 1, d_prin1+medium, 1); // d_prin = d_prout*-1 + d_prin
cublasSasum(cublasHandle1, nVtx-medium, d_prin1+medium, 1, d_eps1);
cudaMemcpyAsync(h_eps1, d_eps1, sizeof(*h_eps1), cudaMemcpyDeviceToHost, stream1);
cudaSetDevice(1);
cudaMemcpyAsync(d_prin1+medium, d_prout1+medium, (nVtx-medium)*sizeof(*prout), cudaMemcpyDeviceToDevice,stream1);//prepare prin for next iteration
// checkCudaErrors(cudaMemcpy(gpu1, test_GPU1, 3*sizeof(int), cudaMemcpyDeviceToHost));
cudaSetDevice(0);
cudaMemcpyAsync(d_prin0, d_prout0, (medium)*sizeof(*prout), cudaMemcpyDeviceToDevice, stream0);//prepare prin for next iteration
// checkCudaErrors(cudaMemcpy(gpu0, test_GPU0, 3*sizeof(int), cudaMemcpyDeviceToHost));
cudaSetDevice(1);
checkCudaErrors( cudaStreamSynchronize(stream1));
cudaSetDevice(0);
checkCudaErrors( cudaStreamSynchronize(stream0));
//stopping condition
std::cerr<<*h_eps0<< " + " << *h_eps1<< " = " << *h_eps0+ *h_eps1 << std::endl;
//if (*h_eps0 +*h_eps1 < 0) // deactivited for testing purposes
// iter = maxiter;
// std::cerr << " GPU0=("<<gpu0[0]<< ", "<< gpu0[1]<< ", "<< gpu0[2]<<") ";
// std::cerr << " GPU1=("<<gpu1[0]<< ", "<< gpu1[1]<< ", "<< gpu1[2] << ")" << std::endl;
}
cudaSetDevice(0);
checkCudaErrors(cudaMemcpy(prout, d_prout0, nVtx*sizeof(*prout), cudaMemcpyDeviceToHost));
std::cerr<<" PR="<<prout[0]<< " " << prout[1] << " " << prout[2] << std::endl;
std::cerr<<endl;
if (TRY >= THROW_AWAY)
{
util::timestamp stop;
totaltime += stop - start;
}
}
cudaSetDevice(0);
cudaDeviceReset();
cudaSetDevice(1);
cudaDeviceReset();
delete[] prin_;
{
std::stringstream ss;
ss<<"part1V: "<< medium <<" part1E: "<<xadj[medium+1]
<<" part2V: "<<nVtx-(medium)<<" part2E: "<< xadj[nVtx] - xadj[medium+1];
out = ss.str();
}
return 0;
}
|
512f56112f254fcde1ee421df90aea4e9d0733b8.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
// #include <helper_cuda.h>
#include<cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 20;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < numElements; i++) {
printf("%f ", h_C[i]);
}
printf("\n");
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
| 512f56112f254fcde1ee421df90aea4e9d0733b8.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
// #include <helper_cuda.h>
#include<cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 20;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < numElements; i++) {
printf("%f ", h_C[i]);
}
printf("\n");
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
|
ae0464498a509fe3f4d07cf68a561a9fa22728ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by gautam on 07/05/20.
//
#include "sql_update.cuh"
#define NUM_THREADS 512
__global__ void
updateKernel(void *data, int rowSize, int *offset, int offsetSize, ColType *types, myExpr *exprs, int numRows,
const int *uIds, myExpr *uExprs, int *uOffs, int numUpdates) {
void *res;
int resType = 1;
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
void *tempRow = malloc(rowSize);
void *row;
bool flag;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
row = (char *)data + i * rowSize;
eval(row, offset, types, exprs, res, resType);
flag = false;
if (resType == RESTYPE_INT) {
flag = *(int *) res != 0;
} else if (resType == RESTYPE_FLT) {
flag = *(float *) res != 0;
}
free(res);
if (!flag) continue;
// update row here
memcpy(tempRow, row, rowSize);
for (int j = 0; j < numUpdates; ++j) {
const int col = uIds[j];
myExpr *uExpr = uExprs + uOffs[j];
eval(tempRow, offset, types, uExpr, res, resType);
switch (types[col].type) {
case TYPE_INT:{
// ASSERT RESULT HAS TO BE INT
if (resType == RESTYPE_INT) {
int *x = (int *) ((char *) tempRow + offset[col]);
*x = *(int *) res;
}
break;
}
case TYPE_FLOAT: {
// RESULT CAN BE INT OR FLOAT
if (resType == RESTYPE_INT) {
float *x = (float *) ((char *) tempRow + offset[col]);
*x = *(int *) res;
} else if (resType == RESTYPE_FLT) {
float *x = (float *) ((char *) tempRow + offset[col]);
*x = *(float *) res;
}
break;
}
case TYPE_VARCHAR: {
// RESULT HAS TO BE VARCHAR
if (resType < 0 && -resType <= types[col].size) {
char *x = (char *) tempRow + offset[col];
int resEnd = appendStr(x, (char *) res);
x[resEnd] = 0;
}
break;
}
default:
printf("Not implemented");
break;
}
}
memcpy(row, tempRow, rowSize);
}
}
void sql_update::execute(std::string &query) {
hsql::SQLParserResult *result = hsql::SQLParser::parseSQLString(query);
std::vector<std::string> columnNames;
std::string tableName;
if (result->isValid()) {
const auto *stmt = (const hsql::UpdateStatement *) result->getStatement(0);
tableName = stmt->table->name;
std::vector<myExpr> flattenedExpr;
Data d(tableName);
exprToVec(stmt->where, flattenedExpr, d.mdata.columns, d);
hipSetDevice(0);
hipDeviceReset();
int rowSize = d.mdata.rowSize;
void *data = malloc(d.chunkSize * rowSize);
void *data_d;
int numCols = d.mdata.columns.size();
ColType *type_d;
hipMalloc(&type_d, sizeof(ColType) * numCols);
hipMemcpy(type_d, &d.mdata.datatypes[0], sizeof(ColType) * numCols, hipMemcpyHostToDevice);
myExpr *where_d;
hipMalloc(&where_d, sizeof(myExpr) * flattenedExpr.size());
hipMemcpy(where_d, &flattenedExpr[0], sizeof(myExpr) * flattenedExpr.size(), hipMemcpyHostToDevice);
int *offsets = (int *) malloc(sizeof(int) * (numCols + 1));
offsets[0] = 0; //d.mdata.datatypes[0].size;
for (int i = 1; i <= numCols; i++) {
offsets[i] = offsets[i - 1] + d.mdata.datatypes[i - 1].size;
}
int *offsets_d;
hipMalloc(&offsets_d, sizeof(int) * (numCols + 1));
hipMemcpy(offsets_d, offsets, sizeof(int) * (numCols + 1), hipMemcpyHostToDevice);
int numRows = d.read(data);
hipMalloc(&data_d, d.chunkSize * rowSize);
std::vector<std::vector<myExpr>> updateExprs(stmt->updates->size());
std::vector<int> colIds(stmt->updates->size());
for (int i = 0; i < stmt->updates->size(); ++i) {
hsql::UpdateClause *clause = stmt->updates->at(i);
colIds[i] = d.mdata.colMap[clause->column];
exprToVec(clause->value, updateExprs[i], d.mdata.columns, d);
}
int *updateIds_d;
hipMalloc(&updateIds_d, sizeof(int) * colIds.size());
hipMemcpy(updateIds_d, &colIds[0], sizeof(int) * colIds.size(), hipMemcpyHostToDevice);
myExpr *updateExprs_d;
int total = 0;
std::vector<int> updateOffsets(updateExprs.size());
for (int i = 0; i < updateExprs.size(); ++i) {
updateOffsets[i] = total;
total += updateExprs[i].size();
}
hipMalloc(&updateExprs_d, sizeof(myExpr) * total);
for (int i = 0; i < updateExprs.size(); ++i) {
hipMemcpy(updateExprs_d + updateOffsets[i], &updateExprs[i][0], sizeof(myExpr) * updateExprs[i].size(),
hipMemcpyHostToDevice);
}
int *updateOffsets_d;
hipMalloc(&updateOffsets_d, sizeof(int) * updateOffsets.size());
hipMemcpy(updateOffsets_d, &updateOffsets[0], sizeof(int) * updateOffsets.size(), hipMemcpyHostToDevice);
while (numRows > 0) {
hipMemcpy(data_d, data, rowSize * numRows, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( updateKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, rowSize, offsets_d, numCols, type_d, where_d, numRows, updateIds_d,
updateExprs_d, updateOffsets_d, colIds.size());
hipDeviceSynchronize();
// hipError_t err = hipGetLastError();
// if (err != hipSuccess) {
// printf("Error at %d: %s\n", __LINE__, hipGetErrorString(err));
// }
hipMemcpy(data, data_d, rowSize * numRows, hipMemcpyDeviceToHost);
d.write(data, numRows * d.mdata.rowSize);
numRows = d.read(data);
}
// Free all the data
free(data);
free(offsets);
hipFree(data_d);
hipFree(type_d);
hipFree(where_d);
hipFree(offsets_d);
} else {
printf("QUERY is invalid\n");
}
} | ae0464498a509fe3f4d07cf68a561a9fa22728ac.cu | //
// Created by gautam on 07/05/20.
//
#include "sql_update.cuh"
#define NUM_THREADS 512
__global__ void
updateKernel(void *data, int rowSize, int *offset, int offsetSize, ColType *types, myExpr *exprs, int numRows,
const int *uIds, myExpr *uExprs, int *uOffs, int numUpdates) {
void *res;
int resType = 1;
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
void *tempRow = malloc(rowSize);
void *row;
bool flag;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
row = (char *)data + i * rowSize;
eval(row, offset, types, exprs, res, resType);
flag = false;
if (resType == RESTYPE_INT) {
flag = *(int *) res != 0;
} else if (resType == RESTYPE_FLT) {
flag = *(float *) res != 0;
}
free(res);
if (!flag) continue;
// update row here
memcpy(tempRow, row, rowSize);
for (int j = 0; j < numUpdates; ++j) {
const int col = uIds[j];
myExpr *uExpr = uExprs + uOffs[j];
eval(tempRow, offset, types, uExpr, res, resType);
switch (types[col].type) {
case TYPE_INT:{
// ASSERT RESULT HAS TO BE INT
if (resType == RESTYPE_INT) {
int *x = (int *) ((char *) tempRow + offset[col]);
*x = *(int *) res;
}
break;
}
case TYPE_FLOAT: {
// RESULT CAN BE INT OR FLOAT
if (resType == RESTYPE_INT) {
float *x = (float *) ((char *) tempRow + offset[col]);
*x = *(int *) res;
} else if (resType == RESTYPE_FLT) {
float *x = (float *) ((char *) tempRow + offset[col]);
*x = *(float *) res;
}
break;
}
case TYPE_VARCHAR: {
// RESULT HAS TO BE VARCHAR
if (resType < 0 && -resType <= types[col].size) {
char *x = (char *) tempRow + offset[col];
int resEnd = appendStr(x, (char *) res);
x[resEnd] = 0;
}
break;
}
default:
printf("Not implemented");
break;
}
}
memcpy(row, tempRow, rowSize);
}
}
void sql_update::execute(std::string &query) {
hsql::SQLParserResult *result = hsql::SQLParser::parseSQLString(query);
std::vector<std::string> columnNames;
std::string tableName;
if (result->isValid()) {
const auto *stmt = (const hsql::UpdateStatement *) result->getStatement(0);
tableName = stmt->table->name;
std::vector<myExpr> flattenedExpr;
Data d(tableName);
exprToVec(stmt->where, flattenedExpr, d.mdata.columns, d);
cudaSetDevice(0);
cudaDeviceReset();
int rowSize = d.mdata.rowSize;
void *data = malloc(d.chunkSize * rowSize);
void *data_d;
int numCols = d.mdata.columns.size();
ColType *type_d;
cudaMalloc(&type_d, sizeof(ColType) * numCols);
cudaMemcpy(type_d, &d.mdata.datatypes[0], sizeof(ColType) * numCols, cudaMemcpyHostToDevice);
myExpr *where_d;
cudaMalloc(&where_d, sizeof(myExpr) * flattenedExpr.size());
cudaMemcpy(where_d, &flattenedExpr[0], sizeof(myExpr) * flattenedExpr.size(), cudaMemcpyHostToDevice);
int *offsets = (int *) malloc(sizeof(int) * (numCols + 1));
offsets[0] = 0; //d.mdata.datatypes[0].size;
for (int i = 1; i <= numCols; i++) {
offsets[i] = offsets[i - 1] + d.mdata.datatypes[i - 1].size;
}
int *offsets_d;
cudaMalloc(&offsets_d, sizeof(int) * (numCols + 1));
cudaMemcpy(offsets_d, offsets, sizeof(int) * (numCols + 1), cudaMemcpyHostToDevice);
int numRows = d.read(data);
cudaMalloc(&data_d, d.chunkSize * rowSize);
std::vector<std::vector<myExpr>> updateExprs(stmt->updates->size());
std::vector<int> colIds(stmt->updates->size());
for (int i = 0; i < stmt->updates->size(); ++i) {
hsql::UpdateClause *clause = stmt->updates->at(i);
colIds[i] = d.mdata.colMap[clause->column];
exprToVec(clause->value, updateExprs[i], d.mdata.columns, d);
}
int *updateIds_d;
cudaMalloc(&updateIds_d, sizeof(int) * colIds.size());
cudaMemcpy(updateIds_d, &colIds[0], sizeof(int) * colIds.size(), cudaMemcpyHostToDevice);
myExpr *updateExprs_d;
int total = 0;
std::vector<int> updateOffsets(updateExprs.size());
for (int i = 0; i < updateExprs.size(); ++i) {
updateOffsets[i] = total;
total += updateExprs[i].size();
}
cudaMalloc(&updateExprs_d, sizeof(myExpr) * total);
for (int i = 0; i < updateExprs.size(); ++i) {
cudaMemcpy(updateExprs_d + updateOffsets[i], &updateExprs[i][0], sizeof(myExpr) * updateExprs[i].size(),
cudaMemcpyHostToDevice);
}
int *updateOffsets_d;
cudaMalloc(&updateOffsets_d, sizeof(int) * updateOffsets.size());
cudaMemcpy(updateOffsets_d, &updateOffsets[0], sizeof(int) * updateOffsets.size(), cudaMemcpyHostToDevice);
while (numRows > 0) {
cudaMemcpy(data_d, data, rowSize * numRows, cudaMemcpyHostToDevice);
updateKernel<<<1, NUM_THREADS>>>(data_d, rowSize, offsets_d, numCols, type_d, where_d, numRows, updateIds_d,
updateExprs_d, updateOffsets_d, colIds.size());
cudaDeviceSynchronize();
// cudaError_t err = cudaGetLastError();
// if (err != cudaSuccess) {
// printf("Error at %d: %s\n", __LINE__, cudaGetErrorString(err));
// }
cudaMemcpy(data, data_d, rowSize * numRows, cudaMemcpyDeviceToHost);
d.write(data, numRows * d.mdata.rowSize);
numRows = d.read(data);
}
// Free all the data
free(data);
free(offsets);
cudaFree(data_d);
cudaFree(type_d);
cudaFree(where_d);
cudaFree(offsets_d);
} else {
printf("QUERY is invalid\n");
}
} |
f820626ac2eb06aded24b25646b7c7f271b961ca.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief Breadth-first Search Top-Down test program
* @file
*/
#include "Static/BetweennessCentrality/bc.cuh"
#include "Static/BetweennessCentrality/exact_bc.cuh"
#include "Static/BetweennessCentrality/approximate_bc.cuh"
#include <StandardAPI.hpp>
#include <Graph/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <hip/hip_runtime_api.h> //--profile-from-start off
using namespace std;
using namespace graph;
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
int exec(int argc, char* argv[]) {
using namespace timer;
using namespace hornets_nest;
// GraphStd<vid_t, eoff_t> graph(UNDIRECTED);
// graph::GraphStd<vid_t, eoff_t> graph(UNDIRECTED);
graph::GraphStd<vid_t, eoff_t> graph;
CommandLineParam cmd(graph, argc, argv,false);
Timer<DEVICE> TM;
// graph.read(argv[1], SORT | PRINT_INFO);
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
HornetGraph hornet_graph(hornet_init);
BCCentrality bc(hornet_graph);
vid_t root = graph.max_out_degree_id();
if (argc==3)
root = atoi(argv[2]);
// root = 226410;
cout << "Root is " << root << endl;
bc.reset();
bc.setRoot(root);
hipProfilerStart();TM.start();
bc.run();
TM.stop();hipProfilerStop();
TM.print("BCCentrality");
#if 0
// auto is_correct = bc.validate();
// std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n");
// return !is_correct;
ExactBC ebc(hornet_graph);
ebc.reset();
hipProfilerStart();TM.start();
// ebc.run();
TM.stop();hipProfilerStop();
TM.print("Exact BCCentrality");
vid_t numRoots=1000;
vid_t* roots = new vid_t[numRoots];
ApproximateBC::generateRandomRootsUniform(hornet_graph.nV(), numRoots, &roots, 1 );
ApproximateBC abc(hornet_graph, roots,numRoots);
abc.reset();
hipProfilerStart();TM.start();
// abc.run();
TM.stop();hipProfilerStop();
TM.print("Approximate BCCentrality");
delete[] roots;
#endif
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
#if defined(RMM_WRAPPER)
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
#endif
ret = exec(argc, argv);
#if defined(RMM_WRAPPER)
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
#endif
return ret;
}
| f820626ac2eb06aded24b25646b7c7f271b961ca.cu | /**
* @brief Breadth-first Search Top-Down test program
* @file
*/
#include "Static/BetweennessCentrality/bc.cuh"
#include "Static/BetweennessCentrality/exact_bc.cuh"
#include "Static/BetweennessCentrality/approximate_bc.cuh"
#include <StandardAPI.hpp>
#include <Graph/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <cuda_profiler_api.h> //--profile-from-start off
using namespace std;
using namespace graph;
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
int exec(int argc, char* argv[]) {
using namespace timer;
using namespace hornets_nest;
// GraphStd<vid_t, eoff_t> graph(UNDIRECTED);
// graph::GraphStd<vid_t, eoff_t> graph(UNDIRECTED);
graph::GraphStd<vid_t, eoff_t> graph;
CommandLineParam cmd(graph, argc, argv,false);
Timer<DEVICE> TM;
// graph.read(argv[1], SORT | PRINT_INFO);
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
HornetGraph hornet_graph(hornet_init);
BCCentrality bc(hornet_graph);
vid_t root = graph.max_out_degree_id();
if (argc==3)
root = atoi(argv[2]);
// root = 226410;
cout << "Root is " << root << endl;
bc.reset();
bc.setRoot(root);
cudaProfilerStart();TM.start();
bc.run();
TM.stop();cudaProfilerStop();
TM.print("BCCentrality");
#if 0
// auto is_correct = bc.validate();
// std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n");
// return !is_correct;
ExactBC ebc(hornet_graph);
ebc.reset();
cudaProfilerStart();TM.start();
// ebc.run();
TM.stop();cudaProfilerStop();
TM.print("Exact BCCentrality");
vid_t numRoots=1000;
vid_t* roots = new vid_t[numRoots];
ApproximateBC::generateRandomRootsUniform(hornet_graph.nV(), numRoots, &roots, 1 );
ApproximateBC abc(hornet_graph, roots,numRoots);
abc.reset();
cudaProfilerStart();TM.start();
// abc.run();
TM.stop();cudaProfilerStop();
TM.print("Approximate BCCentrality");
delete[] roots;
#endif
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
#if defined(RMM_WRAPPER)
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
#endif
ret = exec(argc, argv);
#if defined(RMM_WRAPPER)
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
#endif
return ret;
}
|
bd40f1e516532d7bcab3df93c55550d60442b45c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg)
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void resetZeros(int n, int *a) {
int index = (blockDim.x*blockIdx.x) + threadIdx.x;
if (index >= n) return;
a[index] = 0;
}
__global__ void upSweep(int n, int d, int *idata) {
int index = (blockDim.x*blockIdx.x) + threadIdx.x;
int twoPowd1 = 1 << (d + 1);
int twoPowd = 1 << d;
if ((index % twoPowd1 != twoPowd1-1) || index >= n) return;
int k = index - twoPowd1 + 1;
idata[index] += idata[k + twoPowd - 1];
}
__global__ void downSweep(int n, int d, int *idata) {
int index = (blockDim.x*blockIdx.x) + threadIdx.x;
int twoPowd1 = 1 << (d + 1);
int twoPowd = 1 << d;
if ((index % twoPowd1 != twoPowd1 - 1) || index >= n) return;
int k = index - twoPowd1 + 1;
int t = idata[k + twoPowd - 1];
idata[k + twoPowd - 1] = idata[index];
idata[index] += t;
}
void printxxx(int n, const int *a) {
for (int i = 0; i < n; i++) {
printf("%d ", a[i]);
}
printf("\n\n\n");
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
bool exception = false;
int *dev_idata;
int numThreads = 128;
int numBlocks = (n + numThreads - 1) / numThreads;
int d_max = ilog2ceil(n);
int twoPowN = 1 << d_max;
if (n != twoPowN) {
int diff = twoPowN - n;
hipMalloc((void **)&dev_idata, (n + diff) * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata1 failed!");
resetZeros << <numBlocks, numThreads >> > (n + diff, dev_idata);
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
n = n + diff;
} else {
hipMalloc((void **)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_idata failed!");
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
}
try {
timer().startGpuTimer();
}
catch (const std::runtime_error& ex) {
exception = true;
}
for (int d = 0; d < d_max; d++) {
hipLaunchKernelGGL(( upSweep), dim3(numBlocks), dim3(numThreads), 0, 0, n, d, dev_idata);
}
// reset last element to zero
int* zero = new int[1];
zero[0] = 0;
hipMemcpy(dev_idata + n - 1, zero, sizeof(int), hipMemcpyHostToDevice);
for(int d = d_max-1; d >= 0; d--) {
downSweep << <numBlocks, numThreads >> > (n, d, dev_idata);
}
if (!exception)
timer().endGpuTimer();
hipMemcpy(odata, dev_idata, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_idata);
}
void scanCompact(int n, int *odata, const int *idata) {
bool exception = false;
int *dev_idata;
int numThreads = 128;
int numBlocks = (n + numThreads - 1) / numThreads;
int d_max = ilog2ceil(n);
int twoPowN = 1 << d_max;
if (n != twoPowN) {
int diff = twoPowN - n;
hipMalloc((void **)&dev_idata, (n + diff) * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata1 failed!");
resetZeros << <numBlocks, numThreads >> > (n + diff, dev_idata);
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyDeviceToDevice);
n = n + diff;
}
else {
hipMalloc((void **)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_idata failed!");
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyDeviceToDevice);
}
try {
timer().startGpuTimer();
}
catch (const std::runtime_error& ex) {
exception = true;
}
for (int d = 0; d < d_max; d++) {
upSweep << <numBlocks, numThreads >> > (n, d, dev_idata);
}
// reset last element to zero
int* zero = new int[1];
zero[0] = 0;
hipMemcpy(dev_idata + n - 1, zero, sizeof(int), hipMemcpyHostToDevice);
for (int d = d_max - 1; d >= 0; d--) {
downSweep << <numBlocks, numThreads >> > (n, d, dev_idata);
}
if (!exception)
timer().endGpuTimer();
hipMemcpy(odata, dev_idata, n * sizeof(int), hipMemcpyDeviceToDevice);
hipFree(dev_idata);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int numThreads = 128;
int numBlocks = (n + numThreads - 1) / numThreads;
int *dev_checkZeros, *dev_sumIndices, *dev_odata, *dev_idata;
hipMalloc((void **) &dev_checkZeros, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_checkZeros failed!");
hipMalloc((void **) &dev_sumIndices, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_sumIndices failed!");
hipMalloc((void **)&dev_odata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
hipMalloc((void **)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_idata failed!");
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
timer().startGpuTimer();
hipLaunchKernelGGL(( StreamCompaction::Common::kernMapToBoolean), dim3(numBlocks), dim3(numThreads), 0, 0, n, dev_checkZeros, dev_idata);
int *checkZeros = new int[n];
hipMemcpy(checkZeros, dev_checkZeros, n * sizeof(int), hipMemcpyDeviceToHost);
int *sumIndices = new int[n];
scan(n, sumIndices, checkZeros);
hipMemcpy(dev_sumIndices, sumIndices , n * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( StreamCompaction::Common::kernScatter), dim3(numBlocks), dim3(numThreads), 0, 0, n, dev_odata, dev_idata, dev_checkZeros, dev_sumIndices);
timer().endGpuTimer();
hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost);
int count = checkZeros[n - 1] == 0 ? sumIndices[n - 1] : sumIndices[n - 1] + 1;
//delete[] checkZeros;
//delete[] sumIndices;
//printf("hey\n");
hipFree(dev_idata);
hipFree(dev_odata);
hipFree(dev_checkZeros);
hipFree(dev_sumIndices);
return count;
}
//int compact(int n, int *odata, const int *idata) {
// int numThreads = 128;
// int numBlocks = (n + numThreads - 1) / numThreads;
// int *dev_checkZeros, *dev_sumIndices, *dev_odata, *dev_idata;
// hipMalloc((void **)&dev_checkZeros, n * sizeof(int));
// checkCUDAErrorWithLine("hipMalloc dev_checkZeros failed!");
// hipMalloc((void **)&dev_sumIndices, n * sizeof(int));
// checkCUDAErrorWithLine("hipMalloc dev_sumIndices failed!");
// hipMalloc((void **)&dev_odata, n * sizeof(int));
// checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
// hipMalloc((void **)&dev_idata, n * sizeof(int));
// checkCUDAErrorWithLine("hipMalloc dev_idata failed!");
// hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
// timer().startGpuTimer();
// StreamCompaction::Common::kernMapToBoolean << <numBlocks, numThreads >> > (n, dev_checkZeros, dev_idata);
// //int *checkZeros = new int[n];
// //hipMemcpy(checkZeros, dev_checkZeros, n * sizeof(int), hipMemcpyDeviceToHost);
// //int *sumIndices = new int[n];
// scanCompact(n, dev_sumIndices, dev_checkZeros);
// //hipMemcpy(dev_sumIndices, sumIndices, n * sizeof(int), hipMemcpyHostToDevice);
// StreamCompaction::Common::kernScatter << <numBlocks, numThreads >> > (n, dev_odata, dev_idata, dev_checkZeros, dev_sumIndices);
// timer().endGpuTimer();
// hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost);
// int *sumIndices = new int[n];
// int *checkZeros = new int[n];
// hipMemcpy(checkZeros, dev_checkZeros, n * sizeof(int), hipMemcpyDeviceToHost);
// hipMemcpy(sumIndices, dev_sumIndices, n * sizeof(int), hipMemcpyDeviceToHost);
// int count = checkZeros[n - 1] == 0 ? sumIndices[n - 1] : sumIndices[n - 1] + 1;
// //delete[] checkZeros;
// //delete[] sumIndices;
// //printf("hey\n");
// hipFree(dev_idata);
// hipFree(dev_odata);
// hipFree(dev_checkZeros);
// hipFree(dev_sumIndices);
// delete[] sumIndices;
// delete[] checkZeros;
// return count;
//}
}
}
| bd40f1e516532d7bcab3df93c55550d60442b45c.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg)
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void resetZeros(int n, int *a) {
int index = (blockDim.x*blockIdx.x) + threadIdx.x;
if (index >= n) return;
a[index] = 0;
}
__global__ void upSweep(int n, int d, int *idata) {
int index = (blockDim.x*blockIdx.x) + threadIdx.x;
int twoPowd1 = 1 << (d + 1);
int twoPowd = 1 << d;
if ((index % twoPowd1 != twoPowd1-1) || index >= n) return;
int k = index - twoPowd1 + 1;
idata[index] += idata[k + twoPowd - 1];
}
__global__ void downSweep(int n, int d, int *idata) {
int index = (blockDim.x*blockIdx.x) + threadIdx.x;
int twoPowd1 = 1 << (d + 1);
int twoPowd = 1 << d;
if ((index % twoPowd1 != twoPowd1 - 1) || index >= n) return;
int k = index - twoPowd1 + 1;
int t = idata[k + twoPowd - 1];
idata[k + twoPowd - 1] = idata[index];
idata[index] += t;
}
void printxxx(int n, const int *a) {
for (int i = 0; i < n; i++) {
printf("%d ", a[i]);
}
printf("\n\n\n");
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
bool exception = false;
int *dev_idata;
int numThreads = 128;
int numBlocks = (n + numThreads - 1) / numThreads;
int d_max = ilog2ceil(n);
int twoPowN = 1 << d_max;
if (n != twoPowN) {
int diff = twoPowN - n;
cudaMalloc((void **)&dev_idata, (n + diff) * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata1 failed!");
resetZeros << <numBlocks, numThreads >> > (n + diff, dev_idata);
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
n = n + diff;
} else {
cudaMalloc((void **)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_idata failed!");
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
}
try {
timer().startGpuTimer();
}
catch (const std::runtime_error& ex) {
exception = true;
}
for (int d = 0; d < d_max; d++) {
upSweep<<<numBlocks, numThreads>>>(n, d, dev_idata);
}
// reset last element to zero
int* zero = new int[1];
zero[0] = 0;
cudaMemcpy(dev_idata + n - 1, zero, sizeof(int), cudaMemcpyHostToDevice);
for(int d = d_max-1; d >= 0; d--) {
downSweep << <numBlocks, numThreads >> > (n, d, dev_idata);
}
if (!exception)
timer().endGpuTimer();
cudaMemcpy(odata, dev_idata, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_idata);
}
void scanCompact(int n, int *odata, const int *idata) {
bool exception = false;
int *dev_idata;
int numThreads = 128;
int numBlocks = (n + numThreads - 1) / numThreads;
int d_max = ilog2ceil(n);
int twoPowN = 1 << d_max;
if (n != twoPowN) {
int diff = twoPowN - n;
cudaMalloc((void **)&dev_idata, (n + diff) * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata1 failed!");
resetZeros << <numBlocks, numThreads >> > (n + diff, dev_idata);
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyDeviceToDevice);
n = n + diff;
}
else {
cudaMalloc((void **)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_idata failed!");
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyDeviceToDevice);
}
try {
timer().startGpuTimer();
}
catch (const std::runtime_error& ex) {
exception = true;
}
for (int d = 0; d < d_max; d++) {
upSweep << <numBlocks, numThreads >> > (n, d, dev_idata);
}
// reset last element to zero
int* zero = new int[1];
zero[0] = 0;
cudaMemcpy(dev_idata + n - 1, zero, sizeof(int), cudaMemcpyHostToDevice);
for (int d = d_max - 1; d >= 0; d--) {
downSweep << <numBlocks, numThreads >> > (n, d, dev_idata);
}
if (!exception)
timer().endGpuTimer();
cudaMemcpy(odata, dev_idata, n * sizeof(int), cudaMemcpyDeviceToDevice);
cudaFree(dev_idata);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int numThreads = 128;
int numBlocks = (n + numThreads - 1) / numThreads;
int *dev_checkZeros, *dev_sumIndices, *dev_odata, *dev_idata;
cudaMalloc((void **) &dev_checkZeros, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_checkZeros failed!");
cudaMalloc((void **) &dev_sumIndices, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_sumIndices failed!");
cudaMalloc((void **)&dev_odata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
cudaMalloc((void **)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_idata failed!");
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
timer().startGpuTimer();
StreamCompaction::Common::kernMapToBoolean<<<numBlocks, numThreads>>>(n, dev_checkZeros, dev_idata);
int *checkZeros = new int[n];
cudaMemcpy(checkZeros, dev_checkZeros, n * sizeof(int), cudaMemcpyDeviceToHost);
int *sumIndices = new int[n];
scan(n, sumIndices, checkZeros);
cudaMemcpy(dev_sumIndices, sumIndices , n * sizeof(int), cudaMemcpyHostToDevice);
StreamCompaction::Common::kernScatter<<<numBlocks, numThreads>>>(n, dev_odata, dev_idata, dev_checkZeros, dev_sumIndices);
timer().endGpuTimer();
cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
int count = checkZeros[n - 1] == 0 ? sumIndices[n - 1] : sumIndices[n - 1] + 1;
//delete[] checkZeros;
//delete[] sumIndices;
//printf("hey\n");
cudaFree(dev_idata);
cudaFree(dev_odata);
cudaFree(dev_checkZeros);
cudaFree(dev_sumIndices);
return count;
}
//int compact(int n, int *odata, const int *idata) {
// int numThreads = 128;
// int numBlocks = (n + numThreads - 1) / numThreads;
// int *dev_checkZeros, *dev_sumIndices, *dev_odata, *dev_idata;
// cudaMalloc((void **)&dev_checkZeros, n * sizeof(int));
// checkCUDAErrorWithLine("cudaMalloc dev_checkZeros failed!");
// cudaMalloc((void **)&dev_sumIndices, n * sizeof(int));
// checkCUDAErrorWithLine("cudaMalloc dev_sumIndices failed!");
// cudaMalloc((void **)&dev_odata, n * sizeof(int));
// checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
// cudaMalloc((void **)&dev_idata, n * sizeof(int));
// checkCUDAErrorWithLine("cudaMalloc dev_idata failed!");
// cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
// timer().startGpuTimer();
// StreamCompaction::Common::kernMapToBoolean << <numBlocks, numThreads >> > (n, dev_checkZeros, dev_idata);
// //int *checkZeros = new int[n];
// //cudaMemcpy(checkZeros, dev_checkZeros, n * sizeof(int), cudaMemcpyDeviceToHost);
// //int *sumIndices = new int[n];
// scanCompact(n, dev_sumIndices, dev_checkZeros);
// //cudaMemcpy(dev_sumIndices, sumIndices, n * sizeof(int), cudaMemcpyHostToDevice);
// StreamCompaction::Common::kernScatter << <numBlocks, numThreads >> > (n, dev_odata, dev_idata, dev_checkZeros, dev_sumIndices);
// timer().endGpuTimer();
// cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
// int *sumIndices = new int[n];
// int *checkZeros = new int[n];
// cudaMemcpy(checkZeros, dev_checkZeros, n * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(sumIndices, dev_sumIndices, n * sizeof(int), cudaMemcpyDeviceToHost);
// int count = checkZeros[n - 1] == 0 ? sumIndices[n - 1] : sumIndices[n - 1] + 1;
// //delete[] checkZeros;
// //delete[] sumIndices;
// //printf("hey\n");
// cudaFree(dev_idata);
// cudaFree(dev_odata);
// cudaFree(dev_checkZeros);
// cudaFree(dev_sumIndices);
// delete[] sumIndices;
// delete[] checkZeros;
// return count;
//}
}
}
|
fcbbc9046cf0514baaded5465bfc26b14cad2b5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_cellz;
int xdim0_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim0_initialise_chunk_kernel_cellz;
int ydim0_initialise_chunk_kernel_cellz_h = -1;
__constant__ int xdim1_initialise_chunk_kernel_cellz;
int xdim1_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim1_initialise_chunk_kernel_cellz;
int ydim1_initialise_chunk_kernel_cellz_h = -1;
__constant__ int xdim2_initialise_chunk_kernel_cellz;
int xdim2_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim2_initialise_chunk_kernel_cellz;
int ydim2_initialise_chunk_kernel_cellz_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x, y, z) \
(x + xdim0_initialise_chunk_kernel_cellz * (y) + \
xdim0_initialise_chunk_kernel_cellz * ydim0_initialise_chunk_kernel_cellz * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_initialise_chunk_kernel_cellz * (y) + \
xdim1_initialise_chunk_kernel_cellz * ydim1_initialise_chunk_kernel_cellz * \
(z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_initialise_chunk_kernel_cellz * (y) + \
xdim2_initialise_chunk_kernel_cellz * ydim2_initialise_chunk_kernel_cellz * \
(z))
// user function
__device__
void
initialise_chunk_kernel_cellz(const double *vertexz, double *cellz,
double *celldz) {
double d_z = (grid.zmax - grid.zmin) / (double)grid.z_cells;
cellz[OPS_ACC1(0, 0, 0)] =
0.5 * (vertexz[OPS_ACC0(0, 0, 0)] + vertexz[OPS_ACC0(0, 0, 1)]);
celldz[OPS_ACC2(0, 0, 0)] = d_z;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_initialise_chunk_kernel_cellz(const double *__restrict arg0,
double *__restrict arg1,
double *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim0_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim0_initialise_chunk_kernel_cellz *
ydim0_initialise_chunk_kernel_cellz;
arg1 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim1_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim1_initialise_chunk_kernel_cellz *
ydim1_initialise_chunk_kernel_cellz;
arg2 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim2_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim2_initialise_chunk_kernel_cellz *
ydim2_initialise_chunk_kernel_cellz;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
initialise_chunk_kernel_cellz(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_cellz(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 54))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(54, "initialise_chunk_kernel_cellz");
OPS_kernels[54].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
if (xdim0 != xdim0_initialise_chunk_kernel_cellz_h ||
ydim0 != ydim0_initialise_chunk_kernel_cellz_h ||
xdim1 != xdim1_initialise_chunk_kernel_cellz_h ||
ydim1 != ydim1_initialise_chunk_kernel_cellz_h ||
xdim2 != xdim2_initialise_chunk_kernel_cellz_h ||
ydim2 != ydim2_initialise_chunk_kernel_cellz_h) {
hipMemcpyToSymbol(xdim0_initialise_chunk_kernel_cellz, &xdim0,
sizeof(int));
xdim0_initialise_chunk_kernel_cellz_h = xdim0;
hipMemcpyToSymbol(ydim0_initialise_chunk_kernel_cellz, &ydim0,
sizeof(int));
ydim0_initialise_chunk_kernel_cellz_h = ydim0;
hipMemcpyToSymbol(xdim1_initialise_chunk_kernel_cellz, &xdim1,
sizeof(int));
xdim1_initialise_chunk_kernel_cellz_h = xdim1;
hipMemcpyToSymbol(ydim1_initialise_chunk_kernel_cellz, &ydim1,
sizeof(int));
ydim1_initialise_chunk_kernel_cellz_h = ydim1;
hipMemcpyToSymbol(xdim2_initialise_chunk_kernel_cellz, &xdim2,
sizeof(int));
xdim2_initialise_chunk_kernel_cellz_h = xdim2;
hipMemcpyToSymbol(ydim2_initialise_chunk_kernel_cellz, &ydim2,
sizeof(int));
ydim2_initialise_chunk_kernel_cellz_h = ydim2;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_initialise_chunk_kernel_cellz), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[54].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
| fcbbc9046cf0514baaded5465bfc26b14cad2b5d.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_cellz;
int xdim0_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim0_initialise_chunk_kernel_cellz;
int ydim0_initialise_chunk_kernel_cellz_h = -1;
__constant__ int xdim1_initialise_chunk_kernel_cellz;
int xdim1_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim1_initialise_chunk_kernel_cellz;
int ydim1_initialise_chunk_kernel_cellz_h = -1;
__constant__ int xdim2_initialise_chunk_kernel_cellz;
int xdim2_initialise_chunk_kernel_cellz_h = -1;
__constant__ int ydim2_initialise_chunk_kernel_cellz;
int ydim2_initialise_chunk_kernel_cellz_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x, y, z) \
(x + xdim0_initialise_chunk_kernel_cellz * (y) + \
xdim0_initialise_chunk_kernel_cellz * ydim0_initialise_chunk_kernel_cellz * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_initialise_chunk_kernel_cellz * (y) + \
xdim1_initialise_chunk_kernel_cellz * ydim1_initialise_chunk_kernel_cellz * \
(z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_initialise_chunk_kernel_cellz * (y) + \
xdim2_initialise_chunk_kernel_cellz * ydim2_initialise_chunk_kernel_cellz * \
(z))
// user function
__device__
void
initialise_chunk_kernel_cellz(const double *vertexz, double *cellz,
double *celldz) {
double d_z = (grid.zmax - grid.zmin) / (double)grid.z_cells;
cellz[OPS_ACC1(0, 0, 0)] =
0.5 * (vertexz[OPS_ACC0(0, 0, 0)] + vertexz[OPS_ACC0(0, 0, 1)]);
celldz[OPS_ACC2(0, 0, 0)] = d_z;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_initialise_chunk_kernel_cellz(const double *__restrict arg0,
double *__restrict arg1,
double *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim0_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim0_initialise_chunk_kernel_cellz *
ydim0_initialise_chunk_kernel_cellz;
arg1 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim1_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim1_initialise_chunk_kernel_cellz *
ydim1_initialise_chunk_kernel_cellz;
arg2 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim2_initialise_chunk_kernel_cellz +
idx_z * 1 * 1 * xdim2_initialise_chunk_kernel_cellz *
ydim2_initialise_chunk_kernel_cellz;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
initialise_chunk_kernel_cellz(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_cellz(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 54))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(54, "initialise_chunk_kernel_cellz");
OPS_kernels[54].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
if (xdim0 != xdim0_initialise_chunk_kernel_cellz_h ||
ydim0 != ydim0_initialise_chunk_kernel_cellz_h ||
xdim1 != xdim1_initialise_chunk_kernel_cellz_h ||
ydim1 != ydim1_initialise_chunk_kernel_cellz_h ||
xdim2 != xdim2_initialise_chunk_kernel_cellz_h ||
ydim2 != ydim2_initialise_chunk_kernel_cellz_h) {
cudaMemcpyToSymbol(xdim0_initialise_chunk_kernel_cellz, &xdim0,
sizeof(int));
xdim0_initialise_chunk_kernel_cellz_h = xdim0;
cudaMemcpyToSymbol(ydim0_initialise_chunk_kernel_cellz, &ydim0,
sizeof(int));
ydim0_initialise_chunk_kernel_cellz_h = ydim0;
cudaMemcpyToSymbol(xdim1_initialise_chunk_kernel_cellz, &xdim1,
sizeof(int));
xdim1_initialise_chunk_kernel_cellz_h = xdim1;
cudaMemcpyToSymbol(ydim1_initialise_chunk_kernel_cellz, &ydim1,
sizeof(int));
ydim1_initialise_chunk_kernel_cellz_h = ydim1;
cudaMemcpyToSymbol(xdim2_initialise_chunk_kernel_cellz, &xdim2,
sizeof(int));
xdim2_initialise_chunk_kernel_cellz_h = xdim2;
cudaMemcpyToSymbol(ydim2_initialise_chunk_kernel_cellz, &ydim2,
sizeof(int));
ydim2_initialise_chunk_kernel_cellz_h = ydim2;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_initialise_chunk_kernel_cellz<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[54].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[54].mpi_time += t2 - t1;
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
|
c286428b88b6da8551d9171b373c62ca8219b5dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
This file implements upper case, and is called by ztrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_NONBATCHED
#include "ztrtri.cuh"
#include "ztrtri_upper_device.cuh"
/******************************************************************************/
__global__ void
ztrtri_diag_upper_kernel(
magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA)
{
ztrtri_diag_upper_device(diag, n, A, lda, d_dinvA);
}
/******************************************************************************/
__global__ void
triple_zgemm16_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm16_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm32_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm32_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part3_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
| c286428b88b6da8551d9171b373c62ca8219b5dd.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
This file implements upper case, and is called by ztrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_NONBATCHED
#include "ztrtri.cuh"
#include "ztrtri_upper_device.cuh"
/******************************************************************************/
__global__ void
ztrtri_diag_upper_kernel(
magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA)
{
ztrtri_diag_upper_device(diag, n, A, lda, d_dinvA);
}
/******************************************************************************/
__global__ void
triple_zgemm16_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm16_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm32_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm32_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
/******************************************************************************/
__global__ void
triple_zgemm_above64_part3_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
|
f2edf516fd73e69cce7d8e38f6b4548d915a00f4.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "srad.h"
// includes, project
#include <hip/hip_runtime.h>
// includes, kernels
#include "srad_kernel.hip"
void random_matrix(float *I, int rows, int cols);
void runTest( int argc, char** argv);
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]);
fprintf(stderr, "\t<rows> - number of rows\n");
fprintf(stderr, "\t<cols> - number of cols\n");
fprintf(stderr, "\t<y1> - y1 value of the speckle\n");
fprintf(stderr, "\t<y2> - y2 value of the speckle\n");
fprintf(stderr, "\t<x1> - x1 value of the speckle\n");
fprintf(stderr, "\t<x2> - x2 value of the speckle\n");
fprintf(stderr, "\t<lamda> - lambda (0,1)\n");
fprintf(stderr, "\t<no. of iter> - number of iterations\n");
exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
runTest( argc, argv);
return EXIT_SUCCESS;
}
void
runTest( int argc, char** argv)
{
int rows, cols, size_I, size_R, niter = 10, iter;
float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ;
#ifdef CPU
float Jc, G2, L, num, den, qsqr;
int *iN,*iS,*jE,*jW, k;
float *dN,*dS,*dW,*dE;
float cN,cS,cW,cE,D;
#endif
#ifdef GPU
float *J_cuda;
float *C_cuda;
float *E_C, *W_C, *N_C, *S_C;
#endif
unsigned int r1, r2, c1, c2;
float *c;
if (argc == 9)
{
rows = atoi(argv[1]); //number of rows in the domain
cols = atoi(argv[2]); //number of cols in the domain
if ((rows%16!=0) || (cols%16!=0)){
fprintf(stderr, "rows and cols must be multiples of 16\n");
exit(1);
}
r1 = atoi(argv[3]); //y1 position of the speckle
r2 = atoi(argv[4]); //y2 position of the speckle
c1 = atoi(argv[5]); //x1 position of the speckle
c2 = atoi(argv[6]); //x2 position of the speckle
lambda = atof(argv[7]); //Lambda value
niter = atoi(argv[8]); //number of iterations
}
else{
usage(argc, argv);
}
size_I = cols * rows;
size_R = (r2-r1+1)*(c2-c1+1);
I = (float *)malloc( size_I * sizeof(float) );
J = (float *)malloc( size_I * sizeof(float) );
c = (float *)malloc(sizeof(float)* size_I) ;
#ifdef CPU
iN = (int *)malloc(sizeof(unsigned int*) * rows) ;
iS = (int *)malloc(sizeof(unsigned int*) * rows) ;
jW = (int *)malloc(sizeof(unsigned int*) * cols) ;
jE = (int *)malloc(sizeof(unsigned int*) * cols) ;
dN = (float *)malloc(sizeof(float)* size_I) ;
dS = (float *)malloc(sizeof(float)* size_I) ;
dW = (float *)malloc(sizeof(float)* size_I) ;
dE = (float *)malloc(sizeof(float)* size_I) ;
for (int i=0; i< rows; i++) {
iN[i] = i-1;
iS[i] = i+1;
}
for (int j=0; j< cols; j++) {
jW[j] = j-1;
jE[j] = j+1;
}
iN[0] = 0;
iS[rows-1] = rows-1;
jW[0] = 0;
jE[cols-1] = cols-1;
#endif
#ifdef GPU
//Allocate device memory
hipMalloc((void**)& J_cuda, sizeof(float)* size_I);
hipMalloc((void**)& C_cuda, sizeof(float)* size_I);
hipMalloc((void**)& E_C, sizeof(float)* size_I);
hipMalloc((void**)& W_C, sizeof(float)* size_I);
hipMalloc((void**)& S_C, sizeof(float)* size_I);
hipMalloc((void**)& N_C, sizeof(float)* size_I);
#endif
printf("Randomizing the input matrix\n");
//Generate a random matrix
random_matrix(I, rows, cols);
for (int k = 0; k < size_I; k++ ) {
J[k] = (float)exp(I[k]) ;
}
printf("Start the SRAD main loop\n");
for (iter=0; iter< niter; iter++){
sum=0; sum2=0;
for (int i=r1; i<=r2; i++) {
for (int j=c1; j<=c2; j++) {
tmp = J[i * cols + j];
sum += tmp ;
sum2 += tmp*tmp;
}
}
meanROI = sum / size_R;
varROI = (sum2 / size_R) - meanROI*meanROI;
q0sqr = varROI / (meanROI*meanROI);
#ifdef CPU
for (int i = 0 ; i < rows ; i++) {
for (int j = 0; j < cols; j++) {
k = i * cols + j;
Jc = J[k];
// directional derivates
dN[k] = J[iN[i] * cols + j] - Jc;
dS[k] = J[iS[i] * cols + j] - Jc;
dW[k] = J[i * cols + jW[j]] - Jc;
dE[k] = J[i * cols + jE[j]] - Jc;
G2 = (dN[k]*dN[k] + dS[k]*dS[k]
+ dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc);
L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc;
num = (0.5*G2) - ((1.0/16.0)*(L*L)) ;
den = 1 + (.25*L);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c[k] = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c[k] < 0) {c[k] = 0;}
else if (c[k] > 1) {c[k] = 1;}
}
}
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
// current index
k = i * cols + j;
// diffusion coefficent
cN = c[k];
cS = c[iS[i] * cols + j];
cW = c[k];
cE = c[i * cols + jE[j]];
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k];
// image update (equ 61)
J[k] = J[k] + 0.25*lambda*D;
}
}
#endif // CPU
#ifdef GPU
//Currently the input size must be divided by 16 - the block size
int block_x = cols/BLOCK_SIZE ;
int block_y = rows/BLOCK_SIZE ;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(block_x , block_y);
//Copy data from main memory to device memory
hipMemcpy(J_cuda, J, sizeof(float) * size_I, hipMemcpyHostToDevice);
//Run kernels
hipLaunchKernelGGL(( srad_cuda_1), dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr);
hipLaunchKernelGGL(( srad_cuda_2), dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr);
//Copy data from device memory to main memory
hipMemcpy(J, J_cuda, sizeof(float) * size_I, hipMemcpyDeviceToHost);
#endif
}
hipDeviceSynchronize();
#ifdef OUTPUT
//Printing output
printf("Printing Output:\n");
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
printf("%.5f ", J[i * cols + j]);
}
printf("\n");
}
#endif
printf("Computation Done\n");
free(I);
free(J);
#ifdef CPU
free(iN); free(iS); free(jW); free(jE);
free(dN); free(dS); free(dW); free(dE);
#endif
#ifdef GPU
hipFree(C_cuda);
hipFree(J_cuda);
hipFree(E_C);
hipFree(W_C);
hipFree(N_C);
hipFree(S_C);
#endif
free(c);
}
void random_matrix(float *I, int rows, int cols){
srand(7);
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
I[i * cols + j] = rand()/(float)RAND_MAX ;
}
}
}
| f2edf516fd73e69cce7d8e38f6b4548d915a00f4.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "srad.h"
// includes, project
#include <cuda.h>
// includes, kernels
#include "srad_kernel.cu"
void random_matrix(float *I, int rows, int cols);
void runTest( int argc, char** argv);
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]);
fprintf(stderr, "\t<rows> - number of rows\n");
fprintf(stderr, "\t<cols> - number of cols\n");
fprintf(stderr, "\t<y1> - y1 value of the speckle\n");
fprintf(stderr, "\t<y2> - y2 value of the speckle\n");
fprintf(stderr, "\t<x1> - x1 value of the speckle\n");
fprintf(stderr, "\t<x2> - x2 value of the speckle\n");
fprintf(stderr, "\t<lamda> - lambda (0,1)\n");
fprintf(stderr, "\t<no. of iter> - number of iterations\n");
exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
runTest( argc, argv);
return EXIT_SUCCESS;
}
void
runTest( int argc, char** argv)
{
int rows, cols, size_I, size_R, niter = 10, iter;
float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ;
#ifdef CPU
float Jc, G2, L, num, den, qsqr;
int *iN,*iS,*jE,*jW, k;
float *dN,*dS,*dW,*dE;
float cN,cS,cW,cE,D;
#endif
#ifdef GPU
float *J_cuda;
float *C_cuda;
float *E_C, *W_C, *N_C, *S_C;
#endif
unsigned int r1, r2, c1, c2;
float *c;
if (argc == 9)
{
rows = atoi(argv[1]); //number of rows in the domain
cols = atoi(argv[2]); //number of cols in the domain
if ((rows%16!=0) || (cols%16!=0)){
fprintf(stderr, "rows and cols must be multiples of 16\n");
exit(1);
}
r1 = atoi(argv[3]); //y1 position of the speckle
r2 = atoi(argv[4]); //y2 position of the speckle
c1 = atoi(argv[5]); //x1 position of the speckle
c2 = atoi(argv[6]); //x2 position of the speckle
lambda = atof(argv[7]); //Lambda value
niter = atoi(argv[8]); //number of iterations
}
else{
usage(argc, argv);
}
size_I = cols * rows;
size_R = (r2-r1+1)*(c2-c1+1);
I = (float *)malloc( size_I * sizeof(float) );
J = (float *)malloc( size_I * sizeof(float) );
c = (float *)malloc(sizeof(float)* size_I) ;
#ifdef CPU
iN = (int *)malloc(sizeof(unsigned int*) * rows) ;
iS = (int *)malloc(sizeof(unsigned int*) * rows) ;
jW = (int *)malloc(sizeof(unsigned int*) * cols) ;
jE = (int *)malloc(sizeof(unsigned int*) * cols) ;
dN = (float *)malloc(sizeof(float)* size_I) ;
dS = (float *)malloc(sizeof(float)* size_I) ;
dW = (float *)malloc(sizeof(float)* size_I) ;
dE = (float *)malloc(sizeof(float)* size_I) ;
for (int i=0; i< rows; i++) {
iN[i] = i-1;
iS[i] = i+1;
}
for (int j=0; j< cols; j++) {
jW[j] = j-1;
jE[j] = j+1;
}
iN[0] = 0;
iS[rows-1] = rows-1;
jW[0] = 0;
jE[cols-1] = cols-1;
#endif
#ifdef GPU
//Allocate device memory
cudaMalloc((void**)& J_cuda, sizeof(float)* size_I);
cudaMalloc((void**)& C_cuda, sizeof(float)* size_I);
cudaMalloc((void**)& E_C, sizeof(float)* size_I);
cudaMalloc((void**)& W_C, sizeof(float)* size_I);
cudaMalloc((void**)& S_C, sizeof(float)* size_I);
cudaMalloc((void**)& N_C, sizeof(float)* size_I);
#endif
printf("Randomizing the input matrix\n");
//Generate a random matrix
random_matrix(I, rows, cols);
for (int k = 0; k < size_I; k++ ) {
J[k] = (float)exp(I[k]) ;
}
printf("Start the SRAD main loop\n");
for (iter=0; iter< niter; iter++){
sum=0; sum2=0;
for (int i=r1; i<=r2; i++) {
for (int j=c1; j<=c2; j++) {
tmp = J[i * cols + j];
sum += tmp ;
sum2 += tmp*tmp;
}
}
meanROI = sum / size_R;
varROI = (sum2 / size_R) - meanROI*meanROI;
q0sqr = varROI / (meanROI*meanROI);
#ifdef CPU
for (int i = 0 ; i < rows ; i++) {
for (int j = 0; j < cols; j++) {
k = i * cols + j;
Jc = J[k];
// directional derivates
dN[k] = J[iN[i] * cols + j] - Jc;
dS[k] = J[iS[i] * cols + j] - Jc;
dW[k] = J[i * cols + jW[j]] - Jc;
dE[k] = J[i * cols + jE[j]] - Jc;
G2 = (dN[k]*dN[k] + dS[k]*dS[k]
+ dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc);
L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc;
num = (0.5*G2) - ((1.0/16.0)*(L*L)) ;
den = 1 + (.25*L);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c[k] = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c[k] < 0) {c[k] = 0;}
else if (c[k] > 1) {c[k] = 1;}
}
}
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
// current index
k = i * cols + j;
// diffusion coefficent
cN = c[k];
cS = c[iS[i] * cols + j];
cW = c[k];
cE = c[i * cols + jE[j]];
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k];
// image update (equ 61)
J[k] = J[k] + 0.25*lambda*D;
}
}
#endif // CPU
#ifdef GPU
//Currently the input size must be divided by 16 - the block size
int block_x = cols/BLOCK_SIZE ;
int block_y = rows/BLOCK_SIZE ;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(block_x , block_y);
//Copy data from main memory to device memory
cudaMemcpy(J_cuda, J, sizeof(float) * size_I, cudaMemcpyHostToDevice);
//Run kernels
srad_cuda_1<<<dimGrid, dimBlock>>>(E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr);
srad_cuda_2<<<dimGrid, dimBlock>>>(E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr);
//Copy data from device memory to main memory
cudaMemcpy(J, J_cuda, sizeof(float) * size_I, cudaMemcpyDeviceToHost);
#endif
}
cudaThreadSynchronize();
#ifdef OUTPUT
//Printing output
printf("Printing Output:\n");
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
printf("%.5f ", J[i * cols + j]);
}
printf("\n");
}
#endif
printf("Computation Done\n");
free(I);
free(J);
#ifdef CPU
free(iN); free(iS); free(jW); free(jE);
free(dN); free(dS); free(dW); free(dE);
#endif
#ifdef GPU
cudaFree(C_cuda);
cudaFree(J_cuda);
cudaFree(E_C);
cudaFree(W_C);
cudaFree(N_C);
cudaFree(S_C);
#endif
free(c);
}
void random_matrix(float *I, int rows, int cols){
srand(7);
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
I[i * cols + j] = rand()/(float)RAND_MAX ;
}
}
}
|
e367039d99c87a9253f96cf3baa1fbcb968f0c4a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = 0;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA (Powered by HexTech)
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Allocate CUDA events for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
printf("done\n");
hipDeviceSynchronize();
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("OK\n");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipDeviceReset();
return EXIT_SUCCESS;
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| e367039d99c87a9253f96cf3baa1fbcb968f0c4a.cu | #include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = 0;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA (Powered by HexTech)
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Allocate CUDA events for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
printf("done\n");
cudaDeviceSynchronize();
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("OK\n");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
return EXIT_SUCCESS;
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.