hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
c0b9ad6612c101afa4ca5d28941dbcbc33e2c69b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
// ----------------------------------------------------------------------------------------
// Transpose
//
// This file contains both device and host code for transposing a floating-point
// matrix. It performs several transpose kernels, which incrementally improve performance
// through coalescing, removing shared memory bank conflicts, and eliminating partition
// camping. Several of the kernels perform a copy, used to represent the best case
// performance that a transpose can achieve.
//
// Please see the whitepaper in the docs folder of the transpose project for a detailed
// description of this performance study.
// ----------------------------------------------------------------------------------------
// Utilities and system includes
#include <helper_string.h> // helper for string parsing
#include <helper_image.h> // helper for image and data compariosn
#include <helper_cuda.h> // helper for cuda error checking functions
const char *sSDKsample = "Transpose";
// Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
// This sample assumes that MATRIX_SIZE_X = MATRIX_SIZE_Y
int MATRIX_SIZE_X = 1024;
int MATRIX_SIZE_Y = 1024;
int MUL_FACTOR = TILE_DIM;
#define FLOOR(a,b) (a-(a%b))
// Compute the tile size necessary to illustrate performance cases for SM20+ hardware
int MAX_TILES = (FLOOR(MATRIX_SIZE_X,512) * FLOOR(MATRIX_SIZE_Y,512)) / (TILE_DIM *TILE_DIM);
// Number of repetitions used for timing. Two sets of repetitions are performed:
// 1) over kernel launches and 2) inside the kernel over just the loads and stores
#define NUM_REPS 1
// -------------------------------------------------------
// Copies
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void copy(float *odata, float *idata, int width, int height)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index+i*width] = idata[index+i*width];
}
}
__global__ void copySharedMem(float *odata, float *idata, int width, int height)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
if (xIndex < width && yIndex < height)
{
tile[threadIdx.y][threadIdx.x] = idata[index];
}
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
if (xIndex < height && yIndex < width)
{
odata[index] = tile[threadIdx.y][threadIdx.x];
}
}
}
// -------------------------------------------------------
// Transposes
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void transposeNaive(float *odata, float *idata, int width, int height)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index_out+i] = idata[index_in+i*width];
}
}
// coalesced transpose (with bank conflicts)
__global__ void transposeCoalesced(float *odata, float *idata, int width, int height)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
// Coalesced transpose with no bank conflicts
__global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
// Transpose that effectively reorders execution of thread blocks along diagonals of the
// matrix (also coalesced and has no bank conflicts)
//
// Here blockIdx.x is interpreted as the distance along a diagonal and blockIdx.y as
// corresponding to different diagonals
//
// blockIdx_x and blockIdx_y expressions map the diagonal coordinates to the more commonly
// used cartesian coordinates so that the only changes to the code from the coalesced version
// are the calculation of the blockIdx_x and blockIdx_y and replacement of blockIdx.x and
// bloclIdx.y with the subscripted versions in the remaining code
__global__ void transposeDiagonal(float *odata, float *idata, int width, int height)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height)
{
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x;
}
else
{
int bid = blockIdx.x + gridDim.x*blockIdx.y;
blockIdx_y = bid%gridDim.y;
blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x
// and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
// --------------------------------------------------------------------
// Partial transposes
// NB: the coarse- and fine-grained routines only perform part of a
// transpose and will fail the test against the reference solution
//
// They are used to assess performance characteristics of different
// components of a full transpose
// --------------------------------------------------------------------
__global__ void transposeFineGrained(float *odata, float *idata, int width, int height)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + (yIndex)*width;
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS)
{
block[threadIdx.y+i][threadIdx.x] = idata[index+i*width];
}
__syncthreads();
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS)
{
odata[index+i*height] = block[threadIdx.x][threadIdx.y+i];
}
}
__global__ void transposeCoarseGrained(float *odata, float *idata, int width, int height)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i=0; i<TILE_DIM; i += BLOCK_ROWS)
{
block[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i += BLOCK_ROWS)
{
odata[index_out+i*height] = block[threadIdx.y+i][threadIdx.x];
}
}
// ---------------------
// host utility routines
// ---------------------
void computeTransposeGold(float *gold, float *idata,
const int size_x, const int size_y)
{
for (int y = 0; y < size_y; ++y)
{
for (int x = 0; x < size_x; ++x)
{
gold[(x * size_y) + y] = idata[(y * size_x) + x];
}
}
}
void getParams(int argc, char **argv, hipDeviceProp_t &deviceProp, int &size_x, int &size_y, int max_tile_dim)
{
// set matrix size (if (x,y) dim of matrix is not square, then this will have to be modified
if (checkCmdLineFlag(argc, (const char **)argv, "dimX"))
{
size_x = getCmdLineArgumentInt(argc, (const char **) argv, "dimX");
if (size_x > max_tile_dim)
{
printf("> MatrixSize X = %d is greater than the recommended size = %d\n", size_x, max_tile_dim);
}
else
{
printf("> MatrixSize X = %d\n", size_x);
}
}
else
{
size_x = max_tile_dim;
size_x = FLOOR(size_x, 512);
}
if (checkCmdLineFlag(argc, (const char **)argv, "dimY"))
{
size_y = getCmdLineArgumentInt(argc, (const char **) argv, "dimY");
if (size_y > max_tile_dim)
{
printf("> MatrixSize Y = %d is greater than the recommended size = %d\n", size_y, max_tile_dim);
}
else
{
printf("> MatrixSize Y = %d\n", size_y);
}
}
else
{
size_y = max_tile_dim;
// If this is SM12 hardware, we want to round down to a multiple of 512
if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1)
{
size_y = FLOOR(size_y, 512);
}
else // else for SM10,SM11 we round down to a multiple of 384
{
size_y = FLOOR(size_y, 384);
}
}
}
void
showHelp()
{
printf("\n%s : Command line options\n", sSDKsample);
printf("\t-device=n (where n=0,1,2.... for the GPU device)\n\n");
printf("> The default matrix size can be overridden with these parameters\n");
printf("\t-dimX=row_dim_size (matrix row dimensions)\n");
printf("\t-dimY=col_dim_size (matrix column dimensions)\n");
}
// ----
// main
// ----
int
main(int argc, char **argv)
{
// Start logs
printf("%s Starting...\n\n", sSDKsample);
if (checkCmdLineFlag(argc, (const char **)argv, "help"))
{
showHelp();
return 0;
}
int devID = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
// get number of SMs on this GPU
checkCudaErrors(hipGetDevice(&devID));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// compute the scaling factor (for GPUs with fewer MPs)
float scale_factor, total_tiles;
scale_factor = max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f);
printf("> Device %d: \"%s\"\n", devID, deviceProp.name);
printf("> SM Capability %d.%d detected:\n", deviceProp.major, deviceProp.minor);
// Calculate number of tiles we will run for the Matrix Transpose performance tests
int size_x, size_y, max_matrix_dim, matrix_size_test;
matrix_size_test = 512; // we round down max_matrix_dim for this perf test
total_tiles = (float)MAX_TILES / scale_factor;
max_matrix_dim = FLOOR((int)(floor(sqrt(total_tiles))* TILE_DIM), matrix_size_test);
// This is the minimum size allowed
if (max_matrix_dim == 0)
{
max_matrix_dim = matrix_size_test;
}
printf("> [%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n",
deviceProp.name, deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf("> Compute performance scaling factor = %4.2f\n", scale_factor);
// Extract parameters if there are any, command line -dimx and -dimy can override
// any of these settings
getParams(argc, argv, deviceProp, size_x, size_y, max_matrix_dim);
if (size_x != size_y)
{
printf("\n[%s] does not support non-square matrices (row_dim_size(%d) != col_dim_size(%d))\nExiting...\n\n", sSDKsample, size_x, size_y);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(EXIT_FAILURE);
}
if (size_x%TILE_DIM != 0 || size_y%TILE_DIM != 0)
{
printf("[%s] Matrix size must be integral multiple of tile size\nExiting...\n\n", sSDKsample);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(EXIT_FAILURE);
}
// kernel pointer and descriptor
void (*kernel)(float *, float *, int, int);
const char *kernelName;
// execution configuration parameters
dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
if (grid.x < 1 || grid.y < 1)
{
printf("[%s] grid size computation incorrect in test \nExiting...\n\n", sSDKsample);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(EXIT_FAILURE);
}
// CUDA events
hipEvent_t start, stop;
// size of memory required to store the matrix
size_t mem_size = static_cast<size_t>(sizeof(float) * size_x*size_y);
if (2*mem_size > deviceProp.totalGlobalMem)
{
printf("Input matrix size is larger than the available device memory!\n");
printf("Please choose a smaller size matrix\n");
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(EXIT_FAILURE);
}
// allocate host memory
float *h_idata = (float *) malloc(mem_size);
float *h_odata = (float *) malloc(mem_size);
float *transposeGold = (float *) malloc(mem_size);
float *gold;
// allocate device memory
float *d_idata, *d_odata;
checkCudaErrors(hipMalloc((void **) &d_idata, mem_size));
checkCudaErrors(hipMalloc((void **) &d_odata, mem_size));
// initalize host data
for (int i = 0; i < (size_x*size_y); ++i)
{
h_idata[i] = (float) i;
}
// copy host data to device
checkCudaErrors(hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice));
// Compute reference transpose solution
computeTransposeGold(transposeGold, h_idata, size_x, size_y);
// print out common data for all kernels
printf("\nMatrix size: %dx%d (%dx%d tiles), tile size: %dx%d, block size: %dx%d\n\n",
size_x, size_y, size_x/TILE_DIM, size_y/TILE_DIM, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS);
// initialize events
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//
// loop over different kernels
//
bool success = true;
/* for (int k = 0; k<8; k++)
{
// set kernel pointer
switch (k)
{
case 0:
kernel = ©
kernelName = "simple copy ";
break;
case 1:
kernel = ©SharedMem;
kernelName = "shared memory copy";
break;
case 2:
kernel = &transposeNaive;
kernelName = "naive ";
break;
case 3:
kernel = &transposeCoalesced;
kernelName = "coalesced ";
break;
case 4:
kernel = &transposeNoBankConflicts;
kernelName = "optimized ";
break;
case 5:
kernel = &transposeCoarseGrained;
kernelName = "coarse-grained ";
break;
case 6:
kernel = &transposeFineGrained;
kernelName = "fine-grained ";
break;
case 7:
kernel = &transposeDiagonal;
kernelName = "diagonal ";
break;
}
// set reference solution
if (kernel == © || kernel == ©SharedMem)
{
gold = h_idata;
}
else if (kernel == &transposeCoarseGrained || kernel == &transposeFineGrained)
{
gold = h_odata; // fine- and coarse-grained kernels are not full transposes, so bypass check
}
else
{
gold = transposeGold;
} */
kernel = &transposeNoBankConflicts;
kernelName = "optimized ";
gold = transposeGold;
// Clear error status
checkCudaErrors(hipGetLastError());
// Charu: warmup to avoid timing startup
//hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y);
// take measurements for loop over kernel launches
checkCudaErrors(hipEventRecord(start, 0));
for (int i=0; i < NUM_REPS; i++)
{
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y);
// Ensure no launch failure
checkCudaErrors(hipGetLastError());
}
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
float kernelTime;
checkCudaErrors(hipEventElapsedTime(&kernelTime, start, stop));
checkCudaErrors(hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost));
bool res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f);
if (res == false)
{
printf("*** %s kernel FAILED ***\n", kernelName);
success = false;
}
// take measurements for loop inside kernel
checkCudaErrors(hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost));
res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f);
if (res == false)
{
printf("*** %s kernel FAILED ***\n", kernelName);
success = false;
}
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/NUM_REPS);
printf("transpose %s, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelName,
kernelBandwidth,
kernelTime/NUM_REPS,
(size_x *size_y), 1, TILE_DIM *BLOCK_ROWS);
// }
// cleanup
free(h_idata);
free(h_odata);
free(transposeGold);
hipFree(d_idata);
hipFree(d_odata);
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
if (!success)
{
printf("Test failed!\n");
// exit(EXIT_FAILURE);
}
else
printf("Test passed\n");
// exit(EXIT_SUCCESS);
}
| c0b9ad6612c101afa4ca5d28941dbcbc33e2c69b.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
// ----------------------------------------------------------------------------------------
// Transpose
//
// This file contains both device and host code for transposing a floating-point
// matrix. It performs several transpose kernels, which incrementally improve performance
// through coalescing, removing shared memory bank conflicts, and eliminating partition
// camping. Several of the kernels perform a copy, used to represent the best case
// performance that a transpose can achieve.
//
// Please see the whitepaper in the docs folder of the transpose project for a detailed
// description of this performance study.
// ----------------------------------------------------------------------------------------
// Utilities and system includes
#include <helper_string.h> // helper for string parsing
#include <helper_image.h> // helper for image and data compariosn
#include <helper_cuda.h> // helper for cuda error checking functions
const char *sSDKsample = "Transpose";
// Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
// This sample assumes that MATRIX_SIZE_X = MATRIX_SIZE_Y
int MATRIX_SIZE_X = 1024;
int MATRIX_SIZE_Y = 1024;
int MUL_FACTOR = TILE_DIM;
#define FLOOR(a,b) (a-(a%b))
// Compute the tile size necessary to illustrate performance cases for SM20+ hardware
int MAX_TILES = (FLOOR(MATRIX_SIZE_X,512) * FLOOR(MATRIX_SIZE_Y,512)) / (TILE_DIM *TILE_DIM);
// Number of repetitions used for timing. Two sets of repetitions are performed:
// 1) over kernel launches and 2) inside the kernel over just the loads and stores
#define NUM_REPS 1
// -------------------------------------------------------
// Copies
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void copy(float *odata, float *idata, int width, int height)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index+i*width] = idata[index+i*width];
}
}
__global__ void copySharedMem(float *odata, float *idata, int width, int height)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
if (xIndex < width && yIndex < height)
{
tile[threadIdx.y][threadIdx.x] = idata[index];
}
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
if (xIndex < height && yIndex < width)
{
odata[index] = tile[threadIdx.y][threadIdx.x];
}
}
}
// -------------------------------------------------------
// Transposes
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void transposeNaive(float *odata, float *idata, int width, int height)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index_out+i] = idata[index_in+i*width];
}
}
// coalesced transpose (with bank conflicts)
__global__ void transposeCoalesced(float *odata, float *idata, int width, int height)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
// Coalesced transpose with no bank conflicts
__global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
// Transpose that effectively reorders execution of thread blocks along diagonals of the
// matrix (also coalesced and has no bank conflicts)
//
// Here blockIdx.x is interpreted as the distance along a diagonal and blockIdx.y as
// corresponding to different diagonals
//
// blockIdx_x and blockIdx_y expressions map the diagonal coordinates to the more commonly
// used cartesian coordinates so that the only changes to the code from the coalesced version
// are the calculation of the blockIdx_x and blockIdx_y and replacement of blockIdx.x and
// bloclIdx.y with the subscripted versions in the remaining code
__global__ void transposeDiagonal(float *odata, float *idata, int width, int height)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height)
{
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x;
}
else
{
int bid = blockIdx.x + gridDim.x*blockIdx.y;
blockIdx_y = bid%gridDim.y;
blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x
// and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
// --------------------------------------------------------------------
// Partial transposes
// NB: the coarse- and fine-grained routines only perform part of a
// transpose and will fail the test against the reference solution
//
// They are used to assess performance characteristics of different
// components of a full transpose
// --------------------------------------------------------------------
__global__ void transposeFineGrained(float *odata, float *idata, int width, int height)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + (yIndex)*width;
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS)
{
block[threadIdx.y+i][threadIdx.x] = idata[index+i*width];
}
__syncthreads();
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS)
{
odata[index+i*height] = block[threadIdx.x][threadIdx.y+i];
}
}
__global__ void transposeCoarseGrained(float *odata, float *idata, int width, int height)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i=0; i<TILE_DIM; i += BLOCK_ROWS)
{
block[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i += BLOCK_ROWS)
{
odata[index_out+i*height] = block[threadIdx.y+i][threadIdx.x];
}
}
// ---------------------
// host utility routines
// ---------------------
void computeTransposeGold(float *gold, float *idata,
const int size_x, const int size_y)
{
for (int y = 0; y < size_y; ++y)
{
for (int x = 0; x < size_x; ++x)
{
gold[(x * size_y) + y] = idata[(y * size_x) + x];
}
}
}
void getParams(int argc, char **argv, cudaDeviceProp &deviceProp, int &size_x, int &size_y, int max_tile_dim)
{
// set matrix size (if (x,y) dim of matrix is not square, then this will have to be modified
if (checkCmdLineFlag(argc, (const char **)argv, "dimX"))
{
size_x = getCmdLineArgumentInt(argc, (const char **) argv, "dimX");
if (size_x > max_tile_dim)
{
printf("> MatrixSize X = %d is greater than the recommended size = %d\n", size_x, max_tile_dim);
}
else
{
printf("> MatrixSize X = %d\n", size_x);
}
}
else
{
size_x = max_tile_dim;
size_x = FLOOR(size_x, 512);
}
if (checkCmdLineFlag(argc, (const char **)argv, "dimY"))
{
size_y = getCmdLineArgumentInt(argc, (const char **) argv, "dimY");
if (size_y > max_tile_dim)
{
printf("> MatrixSize Y = %d is greater than the recommended size = %d\n", size_y, max_tile_dim);
}
else
{
printf("> MatrixSize Y = %d\n", size_y);
}
}
else
{
size_y = max_tile_dim;
// If this is SM12 hardware, we want to round down to a multiple of 512
if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1)
{
size_y = FLOOR(size_y, 512);
}
else // else for SM10,SM11 we round down to a multiple of 384
{
size_y = FLOOR(size_y, 384);
}
}
}
void
showHelp()
{
printf("\n%s : Command line options\n", sSDKsample);
printf("\t-device=n (where n=0,1,2.... for the GPU device)\n\n");
printf("> The default matrix size can be overridden with these parameters\n");
printf("\t-dimX=row_dim_size (matrix row dimensions)\n");
printf("\t-dimY=col_dim_size (matrix column dimensions)\n");
}
// ----
// main
// ----
int
main(int argc, char **argv)
{
// Start logs
printf("%s Starting...\n\n", sSDKsample);
if (checkCmdLineFlag(argc, (const char **)argv, "help"))
{
showHelp();
return 0;
}
int devID = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
// get number of SMs on this GPU
checkCudaErrors(cudaGetDevice(&devID));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// compute the scaling factor (for GPUs with fewer MPs)
float scale_factor, total_tiles;
scale_factor = max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f);
printf("> Device %d: \"%s\"\n", devID, deviceProp.name);
printf("> SM Capability %d.%d detected:\n", deviceProp.major, deviceProp.minor);
// Calculate number of tiles we will run for the Matrix Transpose performance tests
int size_x, size_y, max_matrix_dim, matrix_size_test;
matrix_size_test = 512; // we round down max_matrix_dim for this perf test
total_tiles = (float)MAX_TILES / scale_factor;
max_matrix_dim = FLOOR((int)(floor(sqrt(total_tiles))* TILE_DIM), matrix_size_test);
// This is the minimum size allowed
if (max_matrix_dim == 0)
{
max_matrix_dim = matrix_size_test;
}
printf("> [%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n",
deviceProp.name, deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf("> Compute performance scaling factor = %4.2f\n", scale_factor);
// Extract parameters if there are any, command line -dimx and -dimy can override
// any of these settings
getParams(argc, argv, deviceProp, size_x, size_y, max_matrix_dim);
if (size_x != size_y)
{
printf("\n[%s] does not support non-square matrices (row_dim_size(%d) != col_dim_size(%d))\nExiting...\n\n", sSDKsample, size_x, size_y);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(EXIT_FAILURE);
}
if (size_x%TILE_DIM != 0 || size_y%TILE_DIM != 0)
{
printf("[%s] Matrix size must be integral multiple of tile size\nExiting...\n\n", sSDKsample);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(EXIT_FAILURE);
}
// kernel pointer and descriptor
void (*kernel)(float *, float *, int, int);
const char *kernelName;
// execution configuration parameters
dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
if (grid.x < 1 || grid.y < 1)
{
printf("[%s] grid size computation incorrect in test \nExiting...\n\n", sSDKsample);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(EXIT_FAILURE);
}
// CUDA events
cudaEvent_t start, stop;
// size of memory required to store the matrix
size_t mem_size = static_cast<size_t>(sizeof(float) * size_x*size_y);
if (2*mem_size > deviceProp.totalGlobalMem)
{
printf("Input matrix size is larger than the available device memory!\n");
printf("Please choose a smaller size matrix\n");
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(EXIT_FAILURE);
}
// allocate host memory
float *h_idata = (float *) malloc(mem_size);
float *h_odata = (float *) malloc(mem_size);
float *transposeGold = (float *) malloc(mem_size);
float *gold;
// allocate device memory
float *d_idata, *d_odata;
checkCudaErrors(cudaMalloc((void **) &d_idata, mem_size));
checkCudaErrors(cudaMalloc((void **) &d_odata, mem_size));
// initalize host data
for (int i = 0; i < (size_x*size_y); ++i)
{
h_idata[i] = (float) i;
}
// copy host data to device
checkCudaErrors(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice));
// Compute reference transpose solution
computeTransposeGold(transposeGold, h_idata, size_x, size_y);
// print out common data for all kernels
printf("\nMatrix size: %dx%d (%dx%d tiles), tile size: %dx%d, block size: %dx%d\n\n",
size_x, size_y, size_x/TILE_DIM, size_y/TILE_DIM, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS);
// initialize events
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//
// loop over different kernels
//
bool success = true;
/* for (int k = 0; k<8; k++)
{
// set kernel pointer
switch (k)
{
case 0:
kernel = ©
kernelName = "simple copy ";
break;
case 1:
kernel = ©SharedMem;
kernelName = "shared memory copy";
break;
case 2:
kernel = &transposeNaive;
kernelName = "naive ";
break;
case 3:
kernel = &transposeCoalesced;
kernelName = "coalesced ";
break;
case 4:
kernel = &transposeNoBankConflicts;
kernelName = "optimized ";
break;
case 5:
kernel = &transposeCoarseGrained;
kernelName = "coarse-grained ";
break;
case 6:
kernel = &transposeFineGrained;
kernelName = "fine-grained ";
break;
case 7:
kernel = &transposeDiagonal;
kernelName = "diagonal ";
break;
}
// set reference solution
if (kernel == © || kernel == ©SharedMem)
{
gold = h_idata;
}
else if (kernel == &transposeCoarseGrained || kernel == &transposeFineGrained)
{
gold = h_odata; // fine- and coarse-grained kernels are not full transposes, so bypass check
}
else
{
gold = transposeGold;
} */
kernel = &transposeNoBankConflicts;
kernelName = "optimized ";
gold = transposeGold;
// Clear error status
checkCudaErrors(cudaGetLastError());
// Charu: warmup to avoid timing startup
// kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y);
// take measurements for loop over kernel launches
checkCudaErrors(cudaEventRecord(start, 0));
for (int i=0; i < NUM_REPS; i++)
{
kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y);
// Ensure no launch failure
checkCudaErrors(cudaGetLastError());
}
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
float kernelTime;
checkCudaErrors(cudaEventElapsedTime(&kernelTime, start, stop));
checkCudaErrors(cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost));
bool res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f);
if (res == false)
{
printf("*** %s kernel FAILED ***\n", kernelName);
success = false;
}
// take measurements for loop inside kernel
checkCudaErrors(cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost));
res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f);
if (res == false)
{
printf("*** %s kernel FAILED ***\n", kernelName);
success = false;
}
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/NUM_REPS);
printf("transpose %s, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelName,
kernelBandwidth,
kernelTime/NUM_REPS,
(size_x *size_y), 1, TILE_DIM *BLOCK_ROWS);
// }
// cleanup
free(h_idata);
free(h_odata);
free(transposeGold);
cudaFree(d_idata);
cudaFree(d_odata);
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
if (!success)
{
printf("Test failed!\n");
// exit(EXIT_FAILURE);
}
else
printf("Test passed\n");
// exit(EXIT_SUCCESS);
}
|
dadbed694b58ebaa1800a317a44a97d3b4a61f55.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <sparse.hpp>
#include <kernel/sparse.hpp>
#include <stdexcept>
#include <string>
#include <arith.hpp>
#include <cast.hpp>
#include <complex.hpp>
#include <copy.hpp>
#include <common/err_common.hpp>
#include <lookup.hpp>
#include <math.hpp>
#include <platform.hpp>
#include <where.hpp>
namespace cuda
{
using namespace common;
using namespace std;
//hipsparseStatus_t hipsparseZcsr2csc(hipsparseHandle_t handle,
// int m, int n, int nnz,
// const hipDoubleComplex *csrSortedVal,
// const int *csrSortedRowPtr, const int *csrSortedColInd,
// hipDoubleComplex *cscSortedVal,
// int *cscSortedRowInd, int *cscSortedColPtr,
// hipsparseAction_t copyValues,
// hipsparseIndexBase_t idxBase);
template<typename T>
struct csr2csc_func_def_t
{
typedef hipsparseStatus_t (*csr2csc_func_def)( hipsparseHandle_t,
int, int, int,
const T *, const int *, const int *,
T *, int *, int *,
hipsparseAction_t,
hipsparseIndexBase_t);
};
//hipsparseStatus_t hipsparseZdense2csr(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *A, int lda,
// const int *nnzPerRow,
// hipDoubleComplex *csrValA,
// int *csrRowPtrA, int *csrColIndA)
template<typename T>
struct dense2csr_func_def_t
{
typedef hipsparseStatus_t (*dense2csr_func_def)( hipsparseHandle_t,
int, int,
const hipsparseMatDescr_t,
const T *, int,
const int *,
T *,
int *, int *);
};
//hipsparseStatus_t hipsparseZdense2csc(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *A, int lda,
// const int *nnzPerCol,
// hipDoubleComplex *cscValA,
// int *cscRowIndA, int *cscColPtrA)
template<typename T>
struct dense2csc_func_def_t
{
typedef hipsparseStatus_t (*dense2csc_func_def)( hipsparseHandle_t,
int, int,
const hipsparseMatDescr_t,
const T *, int,
const int *,
T *,
int *, int *);
};
//hipsparseStatus_t hipsparseZcsr2dense(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *csrValA,
// const int *csrRowPtrA,
// const int *csrColIndA,
// hipDoubleComplex *A, int lda)
template<typename T>
struct csr2dense_func_def_t
{
typedef hipsparseStatus_t (*csr2dense_func_def)( hipsparseHandle_t,
int, int,
const hipsparseMatDescr_t,
const T *,
const int *,
const int *,
T *, int);
};
//hipsparseStatus_t hipsparseZcsc2dense(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *cscValA,
// const int *cscRowIndA,
// const int *cscColPtrA,
// hipDoubleComplex *A, int lda)
template<typename T>
struct csc2dense_func_def_t
{
typedef hipsparseStatus_t (*csc2dense_func_def)( hipsparseHandle_t,
int, int,
const hipsparseMatDescr_t,
const T *,
const int *,
const int *,
T *, int);
};
//hipsparseStatus_t hipsparseZnnz(hipsparseHandle_t handle,
// hipsparseDirection_t dirA,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *A, int lda,
// int *nnzPerRowColumn,
// int *nnzTotalDevHostPtr)
template<typename T>
struct nnz_func_def_t
{
typedef hipsparseStatus_t (*nnz_func_def)( hipsparseHandle_t,
hipsparseDirection_t,
int, int,
const hipsparseMatDescr_t,
const T *, int,
int *, int *);
};
//hipsparseStatus_t hipsparseZgthr(hipsparseHandle_t handle,
// int nnz,
// const hipDoubleComplex *y,
// hipDoubleComplex *xVal, const int *xInd,
// hipsparseIndexBase_t idxBase)
template<typename T>
struct gthr_func_def_t
{
typedef hipsparseStatus_t (*gthr_func_def)(hipsparseHandle_t,
int,
const T *,
T*, const int *,
hipsparseIndexBase_t);
};
#define SPARSE_FUNC_DEF( FUNC ) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def \
FUNC##_func();
#define SPARSE_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def \
FUNC##_func<TYPE>() \
{ return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusparse##PREFIX##FUNC; }
SPARSE_FUNC_DEF(csr2csc)
SPARSE_FUNC(csr2csc, float, S)
SPARSE_FUNC(csr2csc, double, D)
SPARSE_FUNC(csr2csc, cfloat, C)
SPARSE_FUNC(csr2csc, cdouble,Z)
SPARSE_FUNC_DEF(dense2csr)
SPARSE_FUNC(dense2csr, float, S)
SPARSE_FUNC(dense2csr, double, D)
SPARSE_FUNC(dense2csr, cfloat, C)
SPARSE_FUNC(dense2csr, cdouble,Z)
SPARSE_FUNC_DEF(dense2csc)
SPARSE_FUNC(dense2csc, float, S)
SPARSE_FUNC(dense2csc, double, D)
SPARSE_FUNC(dense2csc, cfloat, C)
SPARSE_FUNC(dense2csc, cdouble,Z)
SPARSE_FUNC_DEF(csr2dense)
SPARSE_FUNC(csr2dense, float, S)
SPARSE_FUNC(csr2dense, double, D)
SPARSE_FUNC(csr2dense, cfloat, C)
SPARSE_FUNC(csr2dense, cdouble,Z)
SPARSE_FUNC_DEF(csc2dense)
SPARSE_FUNC(csc2dense, float, S)
SPARSE_FUNC(csc2dense, double, D)
SPARSE_FUNC(csc2dense, cfloat, C)
SPARSE_FUNC(csc2dense, cdouble,Z)
SPARSE_FUNC_DEF(nnz)
SPARSE_FUNC(nnz, float, S)
SPARSE_FUNC(nnz, double, D)
SPARSE_FUNC(nnz, cfloat, C)
SPARSE_FUNC(nnz, cdouble,Z)
SPARSE_FUNC_DEF(gthr)
SPARSE_FUNC(gthr, float, S)
SPARSE_FUNC(gthr, double, D)
SPARSE_FUNC(gthr, cfloat, C)
SPARSE_FUNC(gthr, cdouble,Z)
#undef SPARSE_FUNC
#undef SPARSE_FUNC_DEF
// Partial template specialization of sparseConvertDenseToStorage for COO
// However, template specialization is not allowed
template<typename T>
SparseArray<T> sparseConvertDenseToCOO(const Array<T> &in)
{
Array<uint> nonZeroIdx_ = where<T>(in);
Array<int> nonZeroIdx = cast<int, uint>(nonZeroIdx_);
dim_t nNZ = nonZeroIdx.elements();
Array<int> constDim = createValueArray<int>(dim4(nNZ), in.dims()[0]);
Array<int> rowIdx = arithOp<int, af_mod_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<int> colIdx = arithOp<int, af_div_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<T> values = copyArray<T>(in);
values.modDims(dim4(values.elements()));
values = lookup<T, int>(values, nonZeroIdx, 0);
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, AF_STORAGE_COO);
}
template<typename T, af_storage stype>
SparseArray<T> sparseConvertDenseToStorage(const Array<T> &in)
{
const int M = in.dims()[0];
const int N = in.dims()[1];
// Create Sparse Matrix Descriptor
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
int d = -1;
hipsparseDirection_t dir = HIPSPARSE_DIRECTION_ROW;
if(stype == AF_STORAGE_CSR) {
d = M;
dir = HIPSPARSE_DIRECTION_ROW;
} else {
d = N;
dir = HIPSPARSE_DIRECTION_COLUMN;
}
Array<int> nnzPerDir = createEmptyArray<int>(dim4(d));
int nNZ = -1;
CUSPARSE_CHECK(nnz_func<T>()(
sparseHandle(),
dir,
M, N,
descr,
in.get(), in.strides()[1],
nnzPerDir.get(), &nNZ));
Array<int> rowIdx = createEmptyArray<int>(dim4());
Array<int> colIdx = createEmptyArray<int>(dim4());
if(stype == AF_STORAGE_CSR) {
rowIdx = createEmptyArray<int>(dim4(M+1));
colIdx = createEmptyArray<int>(dim4(nNZ));
} else {
rowIdx = createEmptyArray<int>(dim4(nNZ));
colIdx = createEmptyArray<int>(dim4(N+1));
}
Array<T> values = createEmptyArray<T>(dim4(nNZ));
if(stype == AF_STORAGE_CSR)
CUSPARSE_CHECK(dense2csr_func<T>()(
sparseHandle(),
M, N,
descr,
in.get(), in.strides()[1],
nnzPerDir.get(),
values.get(), rowIdx.get(), colIdx.get()));
else
CUSPARSE_CHECK(dense2csc_func<T>()(
sparseHandle(),
M, N,
descr,
in.get(), in.strides()[1],
nnzPerDir.get(),
values.get(), rowIdx.get(), colIdx.get()));
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(hipsparseDestroyMatDescr(descr));
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, stype);
}
// Partial template specialization of sparseConvertStorageToDense for COO
// However, template specialization is not allowed
template<typename T>
Array<T> sparseConvertCOOToDense(const SparseArray<T> &in)
{
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
const Array<T> values = in.getValues();
const Array<int> rowIdx = in.getRowIdx();
const Array<int> colIdx = in.getColIdx();
kernel::coo2dense<T>(dense, values, rowIdx, colIdx);
return dense;
}
template<typename T, af_storage stype>
Array<T> sparseConvertStorageToDense(const SparseArray<T> &in)
{
// Create Sparse Matrix Descriptor
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
int M = in.dims()[0];
int N = in.dims()[1];
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
int d_strides1 = dense.strides()[1];
if(stype == AF_STORAGE_CSR)
CUSPARSE_CHECK(csr2dense_func<T>()(
sparseHandle(),
M, N,
descr,
in.getValues().get(),
in.getRowIdx().get(),
in.getColIdx().get(),
dense.get(), d_strides1));
else
CUSPARSE_CHECK(csc2dense_func<T>()(
sparseHandle(),
M, N,
descr,
in.getValues().get(),
in.getRowIdx().get(),
in.getColIdx().get(),
dense.get(), d_strides1));
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(hipsparseDestroyMatDescr(descr));
return dense;
}
template<typename T, af_storage dest, af_storage src>
SparseArray<T> sparseConvertStorageToStorage(const SparseArray<T> &in)
{
using std::shared_ptr;
in.eval();
int nNZ = in.getNNZ();
SparseArray<T> converted = createEmptySparseArray<T>(in.dims(), nNZ, dest);
if(src == AF_STORAGE_CSR && dest == AF_STORAGE_COO) {
// Copy colIdx as is
CUDA_CHECK(hipMemcpyAsync(converted.getColIdx().get(), in.getColIdx().get(),
in.getColIdx().elements() * sizeof(int),
hipMemcpyDeviceToDevice,
cuda::getActiveStream()));
// cusparse function to expand compressed row into coordinate
CUSPARSE_CHECK(hipsparseXcsr2coo(
sparseHandle(),
in.getRowIdx().get(),
nNZ, in.dims()[0],
converted.getRowIdx().get(),
HIPSPARSE_INDEX_BASE_ZERO));
// Call sort
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt(
sparseHandle(),
in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(),
&pBufferSizeInBytes));
shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes), memFree<char>);
shared_ptr<int> P(memAlloc<int>(nNZ), memFree<int>);
CUSPARSE_CHECK(hipsparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get()));
CUSPARSE_CHECK(hipsparseXcoosortByColumn(
sparseHandle(),
in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(),
P.get(), (void*)pBuffer.get()));
CUSPARSE_CHECK(gthr_func<T>()(
sparseHandle(), nNZ,
in.getValues().get(),
converted.getValues().get(),
P.get(), HIPSPARSE_INDEX_BASE_ZERO));
} else if (src == AF_STORAGE_COO && dest == AF_STORAGE_CSR) {
// The cusparse csr sort function is not behaving correctly.
// So the work around is to convert the COO into row major and then
// convert it to CSR
// Deep copy input into temporary COO Row Major
SparseArray<T> cooT = createArrayDataSparseArray<T>(in.dims(), in.getValues(),
in.getRowIdx(), in.getColIdx(),
in.getStorage(), true);
// Call sort to convert column major to row major
{
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt(
sparseHandle(),
cooT.dims()[0], cooT.dims()[1], nNZ,
cooT.getRowIdx().get(), cooT.getColIdx().get(),
&pBufferSizeInBytes));
shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes), memFree<char>);
shared_ptr<int> P(memAlloc<int>(nNZ), memFree<int>);
CUSPARSE_CHECK(hipsparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get()));
CUSPARSE_CHECK(hipsparseXcoosortByRow(
sparseHandle(),
cooT.dims()[0], cooT.dims()[1], nNZ,
cooT.getRowIdx().get(), cooT.getColIdx().get(),
P.get(), (void*)pBuffer.get()));
CUSPARSE_CHECK(gthr_func<T>()(
sparseHandle(), nNZ,
in.getValues().get(),
cooT.getValues().get(),
P.get(), HIPSPARSE_INDEX_BASE_ZERO));
}
// Copy values and colIdx as is
CUDA_CHECK(hipMemcpyAsync(converted.getValues().get(), cooT.getValues().get(),
cooT.getValues().elements() * sizeof(T),
hipMemcpyDeviceToDevice,
cuda::getActiveStream()));
CUDA_CHECK(hipMemcpyAsync(converted.getColIdx().get(), cooT.getColIdx().get(),
cooT.getColIdx().elements() * sizeof(int),
hipMemcpyDeviceToDevice,
cuda::getActiveStream()));
// cusparse function to compress row from coordinate
CUSPARSE_CHECK(hipsparseXcoo2csr(
sparseHandle(),
cooT.getRowIdx().get(),
nNZ, cooT.dims()[0],
converted.getRowIdx().get(),
HIPSPARSE_INDEX_BASE_ZERO));
// No need to call CSRSORT
} else {
// Should never come here
AF_ERROR("CUDA Backend invalid conversion combination", AF_ERR_NOT_SUPPORTED);
}
return converted;
}
#define INSTANTIATE_TO_STORAGE(T, S) \
template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_CSR>(const SparseArray<T> &in); \
template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_CSC>(const SparseArray<T> &in); \
template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_COO>(const SparseArray<T> &in); \
#define INSTANTIATE_COO_SPECIAL(T) \
template<> SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_COO>(const Array<T> &in) \
{ return sparseConvertDenseToCOO<T>(in); } \
template<> Array<T> sparseConvertStorageToDense<T, AF_STORAGE_COO>(const SparseArray<T> &in) \
{ return sparseConvertCOOToDense<T>(in); } \
#define INSTANTIATE_SPARSE(T) \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSR>(const Array<T> &in); \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSC>(const Array<T> &in); \
\
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSR>(const SparseArray<T> &in); \
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSC>(const SparseArray<T> &in); \
\
INSTANTIATE_COO_SPECIAL(T) \
\
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSR) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSC) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_COO) \
INSTANTIATE_SPARSE(float)
INSTANTIATE_SPARSE(double)
INSTANTIATE_SPARSE(cfloat)
INSTANTIATE_SPARSE(cdouble)
#undef INSTANTIATE_TO_STORAGE
#undef INSTANTIATE_COO_SPECIAL
#undef INSTANTIATE_SPARSE
}
| dadbed694b58ebaa1800a317a44a97d3b4a61f55.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <sparse.hpp>
#include <kernel/sparse.hpp>
#include <stdexcept>
#include <string>
#include <arith.hpp>
#include <cast.hpp>
#include <complex.hpp>
#include <copy.hpp>
#include <common/err_common.hpp>
#include <lookup.hpp>
#include <math.hpp>
#include <platform.hpp>
#include <where.hpp>
namespace cuda
{
using namespace common;
using namespace std;
//cusparseStatus_t cusparseZcsr2csc(cusparseHandle_t handle,
// int m, int n, int nnz,
// const cuDoubleComplex *csrSortedVal,
// const int *csrSortedRowPtr, const int *csrSortedColInd,
// cuDoubleComplex *cscSortedVal,
// int *cscSortedRowInd, int *cscSortedColPtr,
// cusparseAction_t copyValues,
// cusparseIndexBase_t idxBase);
template<typename T>
struct csr2csc_func_def_t
{
typedef cusparseStatus_t (*csr2csc_func_def)( cusparseHandle_t,
int, int, int,
const T *, const int *, const int *,
T *, int *, int *,
cusparseAction_t,
cusparseIndexBase_t);
};
//cusparseStatus_t cusparseZdense2csr(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *A, int lda,
// const int *nnzPerRow,
// cuDoubleComplex *csrValA,
// int *csrRowPtrA, int *csrColIndA)
template<typename T>
struct dense2csr_func_def_t
{
typedef cusparseStatus_t (*dense2csr_func_def)( cusparseHandle_t,
int, int,
const cusparseMatDescr_t,
const T *, int,
const int *,
T *,
int *, int *);
};
//cusparseStatus_t cusparseZdense2csc(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *A, int lda,
// const int *nnzPerCol,
// cuDoubleComplex *cscValA,
// int *cscRowIndA, int *cscColPtrA)
template<typename T>
struct dense2csc_func_def_t
{
typedef cusparseStatus_t (*dense2csc_func_def)( cusparseHandle_t,
int, int,
const cusparseMatDescr_t,
const T *, int,
const int *,
T *,
int *, int *);
};
//cusparseStatus_t cusparseZcsr2dense(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *csrValA,
// const int *csrRowPtrA,
// const int *csrColIndA,
// cuDoubleComplex *A, int lda)
template<typename T>
struct csr2dense_func_def_t
{
typedef cusparseStatus_t (*csr2dense_func_def)( cusparseHandle_t,
int, int,
const cusparseMatDescr_t,
const T *,
const int *,
const int *,
T *, int);
};
//cusparseStatus_t cusparseZcsc2dense(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *cscValA,
// const int *cscRowIndA,
// const int *cscColPtrA,
// cuDoubleComplex *A, int lda)
template<typename T>
struct csc2dense_func_def_t
{
typedef cusparseStatus_t (*csc2dense_func_def)( cusparseHandle_t,
int, int,
const cusparseMatDescr_t,
const T *,
const int *,
const int *,
T *, int);
};
//cusparseStatus_t cusparseZnnz(cusparseHandle_t handle,
// cusparseDirection_t dirA,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *A, int lda,
// int *nnzPerRowColumn,
// int *nnzTotalDevHostPtr)
template<typename T>
struct nnz_func_def_t
{
typedef cusparseStatus_t (*nnz_func_def)( cusparseHandle_t,
cusparseDirection_t,
int, int,
const cusparseMatDescr_t,
const T *, int,
int *, int *);
};
//cusparseStatus_t cusparseZgthr(cusparseHandle_t handle,
// int nnz,
// const cuDoubleComplex *y,
// cuDoubleComplex *xVal, const int *xInd,
// cusparseIndexBase_t idxBase)
template<typename T>
struct gthr_func_def_t
{
typedef cusparseStatus_t (*gthr_func_def)(cusparseHandle_t,
int,
const T *,
T*, const int *,
cusparseIndexBase_t);
};
#define SPARSE_FUNC_DEF( FUNC ) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def \
FUNC##_func();
#define SPARSE_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def \
FUNC##_func<TYPE>() \
{ return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusparse##PREFIX##FUNC; }
SPARSE_FUNC_DEF(csr2csc)
SPARSE_FUNC(csr2csc, float, S)
SPARSE_FUNC(csr2csc, double, D)
SPARSE_FUNC(csr2csc, cfloat, C)
SPARSE_FUNC(csr2csc, cdouble,Z)
SPARSE_FUNC_DEF(dense2csr)
SPARSE_FUNC(dense2csr, float, S)
SPARSE_FUNC(dense2csr, double, D)
SPARSE_FUNC(dense2csr, cfloat, C)
SPARSE_FUNC(dense2csr, cdouble,Z)
SPARSE_FUNC_DEF(dense2csc)
SPARSE_FUNC(dense2csc, float, S)
SPARSE_FUNC(dense2csc, double, D)
SPARSE_FUNC(dense2csc, cfloat, C)
SPARSE_FUNC(dense2csc, cdouble,Z)
SPARSE_FUNC_DEF(csr2dense)
SPARSE_FUNC(csr2dense, float, S)
SPARSE_FUNC(csr2dense, double, D)
SPARSE_FUNC(csr2dense, cfloat, C)
SPARSE_FUNC(csr2dense, cdouble,Z)
SPARSE_FUNC_DEF(csc2dense)
SPARSE_FUNC(csc2dense, float, S)
SPARSE_FUNC(csc2dense, double, D)
SPARSE_FUNC(csc2dense, cfloat, C)
SPARSE_FUNC(csc2dense, cdouble,Z)
SPARSE_FUNC_DEF(nnz)
SPARSE_FUNC(nnz, float, S)
SPARSE_FUNC(nnz, double, D)
SPARSE_FUNC(nnz, cfloat, C)
SPARSE_FUNC(nnz, cdouble,Z)
SPARSE_FUNC_DEF(gthr)
SPARSE_FUNC(gthr, float, S)
SPARSE_FUNC(gthr, double, D)
SPARSE_FUNC(gthr, cfloat, C)
SPARSE_FUNC(gthr, cdouble,Z)
#undef SPARSE_FUNC
#undef SPARSE_FUNC_DEF
// Partial template specialization of sparseConvertDenseToStorage for COO
// However, template specialization is not allowed
template<typename T>
SparseArray<T> sparseConvertDenseToCOO(const Array<T> &in)
{
Array<uint> nonZeroIdx_ = where<T>(in);
Array<int> nonZeroIdx = cast<int, uint>(nonZeroIdx_);
dim_t nNZ = nonZeroIdx.elements();
Array<int> constDim = createValueArray<int>(dim4(nNZ), in.dims()[0]);
Array<int> rowIdx = arithOp<int, af_mod_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<int> colIdx = arithOp<int, af_div_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<T> values = copyArray<T>(in);
values.modDims(dim4(values.elements()));
values = lookup<T, int>(values, nonZeroIdx, 0);
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, AF_STORAGE_COO);
}
template<typename T, af_storage stype>
SparseArray<T> sparseConvertDenseToStorage(const Array<T> &in)
{
const int M = in.dims()[0];
const int N = in.dims()[1];
// Create Sparse Matrix Descriptor
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
int d = -1;
cusparseDirection_t dir = CUSPARSE_DIRECTION_ROW;
if(stype == AF_STORAGE_CSR) {
d = M;
dir = CUSPARSE_DIRECTION_ROW;
} else {
d = N;
dir = CUSPARSE_DIRECTION_COLUMN;
}
Array<int> nnzPerDir = createEmptyArray<int>(dim4(d));
int nNZ = -1;
CUSPARSE_CHECK(nnz_func<T>()(
sparseHandle(),
dir,
M, N,
descr,
in.get(), in.strides()[1],
nnzPerDir.get(), &nNZ));
Array<int> rowIdx = createEmptyArray<int>(dim4());
Array<int> colIdx = createEmptyArray<int>(dim4());
if(stype == AF_STORAGE_CSR) {
rowIdx = createEmptyArray<int>(dim4(M+1));
colIdx = createEmptyArray<int>(dim4(nNZ));
} else {
rowIdx = createEmptyArray<int>(dim4(nNZ));
colIdx = createEmptyArray<int>(dim4(N+1));
}
Array<T> values = createEmptyArray<T>(dim4(nNZ));
if(stype == AF_STORAGE_CSR)
CUSPARSE_CHECK(dense2csr_func<T>()(
sparseHandle(),
M, N,
descr,
in.get(), in.strides()[1],
nnzPerDir.get(),
values.get(), rowIdx.get(), colIdx.get()));
else
CUSPARSE_CHECK(dense2csc_func<T>()(
sparseHandle(),
M, N,
descr,
in.get(), in.strides()[1],
nnzPerDir.get(),
values.get(), rowIdx.get(), colIdx.get()));
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(cusparseDestroyMatDescr(descr));
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, stype);
}
// Partial template specialization of sparseConvertStorageToDense for COO
// However, template specialization is not allowed
template<typename T>
Array<T> sparseConvertCOOToDense(const SparseArray<T> &in)
{
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
const Array<T> values = in.getValues();
const Array<int> rowIdx = in.getRowIdx();
const Array<int> colIdx = in.getColIdx();
kernel::coo2dense<T>(dense, values, rowIdx, colIdx);
return dense;
}
template<typename T, af_storage stype>
Array<T> sparseConvertStorageToDense(const SparseArray<T> &in)
{
// Create Sparse Matrix Descriptor
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
int M = in.dims()[0];
int N = in.dims()[1];
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
int d_strides1 = dense.strides()[1];
if(stype == AF_STORAGE_CSR)
CUSPARSE_CHECK(csr2dense_func<T>()(
sparseHandle(),
M, N,
descr,
in.getValues().get(),
in.getRowIdx().get(),
in.getColIdx().get(),
dense.get(), d_strides1));
else
CUSPARSE_CHECK(csc2dense_func<T>()(
sparseHandle(),
M, N,
descr,
in.getValues().get(),
in.getRowIdx().get(),
in.getColIdx().get(),
dense.get(), d_strides1));
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(cusparseDestroyMatDescr(descr));
return dense;
}
template<typename T, af_storage dest, af_storage src>
SparseArray<T> sparseConvertStorageToStorage(const SparseArray<T> &in)
{
using std::shared_ptr;
in.eval();
int nNZ = in.getNNZ();
SparseArray<T> converted = createEmptySparseArray<T>(in.dims(), nNZ, dest);
if(src == AF_STORAGE_CSR && dest == AF_STORAGE_COO) {
// Copy colIdx as is
CUDA_CHECK(cudaMemcpyAsync(converted.getColIdx().get(), in.getColIdx().get(),
in.getColIdx().elements() * sizeof(int),
cudaMemcpyDeviceToDevice,
cuda::getActiveStream()));
// cusparse function to expand compressed row into coordinate
CUSPARSE_CHECK(cusparseXcsr2coo(
sparseHandle(),
in.getRowIdx().get(),
nNZ, in.dims()[0],
converted.getRowIdx().get(),
CUSPARSE_INDEX_BASE_ZERO));
// Call sort
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt(
sparseHandle(),
in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(),
&pBufferSizeInBytes));
shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes), memFree<char>);
shared_ptr<int> P(memAlloc<int>(nNZ), memFree<int>);
CUSPARSE_CHECK(cusparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get()));
CUSPARSE_CHECK(cusparseXcoosortByColumn(
sparseHandle(),
in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(),
P.get(), (void*)pBuffer.get()));
CUSPARSE_CHECK(gthr_func<T>()(
sparseHandle(), nNZ,
in.getValues().get(),
converted.getValues().get(),
P.get(), CUSPARSE_INDEX_BASE_ZERO));
} else if (src == AF_STORAGE_COO && dest == AF_STORAGE_CSR) {
// The cusparse csr sort function is not behaving correctly.
// So the work around is to convert the COO into row major and then
// convert it to CSR
// Deep copy input into temporary COO Row Major
SparseArray<T> cooT = createArrayDataSparseArray<T>(in.dims(), in.getValues(),
in.getRowIdx(), in.getColIdx(),
in.getStorage(), true);
// Call sort to convert column major to row major
{
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt(
sparseHandle(),
cooT.dims()[0], cooT.dims()[1], nNZ,
cooT.getRowIdx().get(), cooT.getColIdx().get(),
&pBufferSizeInBytes));
shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes), memFree<char>);
shared_ptr<int> P(memAlloc<int>(nNZ), memFree<int>);
CUSPARSE_CHECK(cusparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get()));
CUSPARSE_CHECK(cusparseXcoosortByRow(
sparseHandle(),
cooT.dims()[0], cooT.dims()[1], nNZ,
cooT.getRowIdx().get(), cooT.getColIdx().get(),
P.get(), (void*)pBuffer.get()));
CUSPARSE_CHECK(gthr_func<T>()(
sparseHandle(), nNZ,
in.getValues().get(),
cooT.getValues().get(),
P.get(), CUSPARSE_INDEX_BASE_ZERO));
}
// Copy values and colIdx as is
CUDA_CHECK(cudaMemcpyAsync(converted.getValues().get(), cooT.getValues().get(),
cooT.getValues().elements() * sizeof(T),
cudaMemcpyDeviceToDevice,
cuda::getActiveStream()));
CUDA_CHECK(cudaMemcpyAsync(converted.getColIdx().get(), cooT.getColIdx().get(),
cooT.getColIdx().elements() * sizeof(int),
cudaMemcpyDeviceToDevice,
cuda::getActiveStream()));
// cusparse function to compress row from coordinate
CUSPARSE_CHECK(cusparseXcoo2csr(
sparseHandle(),
cooT.getRowIdx().get(),
nNZ, cooT.dims()[0],
converted.getRowIdx().get(),
CUSPARSE_INDEX_BASE_ZERO));
// No need to call CSRSORT
} else {
// Should never come here
AF_ERROR("CUDA Backend invalid conversion combination", AF_ERR_NOT_SUPPORTED);
}
return converted;
}
#define INSTANTIATE_TO_STORAGE(T, S) \
template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_CSR>(const SparseArray<T> &in); \
template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_CSC>(const SparseArray<T> &in); \
template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_COO>(const SparseArray<T> &in); \
#define INSTANTIATE_COO_SPECIAL(T) \
template<> SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_COO>(const Array<T> &in) \
{ return sparseConvertDenseToCOO<T>(in); } \
template<> Array<T> sparseConvertStorageToDense<T, AF_STORAGE_COO>(const SparseArray<T> &in) \
{ return sparseConvertCOOToDense<T>(in); } \
#define INSTANTIATE_SPARSE(T) \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSR>(const Array<T> &in); \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSC>(const Array<T> &in); \
\
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSR>(const SparseArray<T> &in); \
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSC>(const SparseArray<T> &in); \
\
INSTANTIATE_COO_SPECIAL(T) \
\
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSR) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSC) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_COO) \
INSTANTIATE_SPARSE(float)
INSTANTIATE_SPARSE(double)
INSTANTIATE_SPARSE(cfloat)
INSTANTIATE_SPARSE(cdouble)
#undef INSTANTIATE_TO_STORAGE
#undef INSTANTIATE_COO_SPECIAL
#undef INSTANTIATE_SPARSE
}
|
ef7407947a45838a8608e81ece2b5c3e8093eaaa.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
// Add your kernel here
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
c[index] = a[index] + b[index];
}
// main
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main(void)
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
int i;
// Allocate memory in Host
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate memory in Device
hipMalloc ((void **) &d_a, size);
hipMalloc ((void **) &d_b, size);
hipMalloc ((void **) &d_c, size);
// Initialize values (0 - 9)
for(i = 0;i < N; i++) {
a[i] = rand() % 10;
b[i] = rand() % 10;
}
// Copy data from Host to Device
hipMemcpy (d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy (d_b, b, size, hipMemcpyHostToDevice);
// Execute
hipLaunchKernelGGL(( add), dim3(N/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c);
// Copy result back to Host
// Take note that it will be smart enough to wait
// until the task at device completed
hipMemcpy (c, d_c, size, hipMemcpyDeviceToHost);
// Display the outcome
for(i=N-100;i<N;i++) {
printf("[%d]\t%2d + %2d = %2d\n", i, a[i], b[i], c[i]);
}
// Clean up at Host
free (a);
free (b);
free (c);
// Clean up at Device
hipFree (d_a);
hipFree (d_b);
hipFree (d_c);
return 0;
}
| ef7407947a45838a8608e81ece2b5c3e8093eaaa.cu | #include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
// Add your kernel here
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
c[index] = a[index] + b[index];
}
// main
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main(void)
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
int i;
// Allocate memory in Host
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate memory in Device
cudaMalloc ((void **) &d_a, size);
cudaMalloc ((void **) &d_b, size);
cudaMalloc ((void **) &d_c, size);
// Initialize values (0 - 9)
for(i = 0;i < N; i++) {
a[i] = rand() % 10;
b[i] = rand() % 10;
}
// Copy data from Host to Device
cudaMemcpy (d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy (d_b, b, size, cudaMemcpyHostToDevice);
// Execute
add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
// Copy result back to Host
// Take note that it will be smart enough to wait
// until the task at device completed
cudaMemcpy (c, d_c, size, cudaMemcpyDeviceToHost);
// Display the outcome
for(i=N-100;i<N;i++) {
printf("[%d]\t%2d + %2d = %2d\n", i, a[i], b[i], c[i]);
}
// Clean up at Host
free (a);
free (b);
free (c);
// Clean up at Device
cudaFree (d_a);
cudaFree (d_b);
cudaFree (d_c);
return 0;
}
|
2d26a395d33201edb4c6cfe673c617308a6906b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lusol.h"
__global__ void LEVEL_CSC_SYNC_0(int n, int *dp, int *jlev, int *tail);
__forceinline__ __device__
static int atomicDec0(int *address) {
volatile int *vaddr = address;
int old = *vaddr, assumed;
do {
assumed = old;
int newv = assumed > 0 ? assumed-1 : 0;
old = atomicCAS(address, assumed, newv);
} while (old != assumed);
return old;
}
__global__ void TOPO_CSC_L(int n, int *ib, int *jb, int *db, int *dp,
int *jlev, int *last, int *first, int *count) {
// thread lane in each warp
const int lane = threadIdx.x & (WARP - 1);
// local warp id
const int wlane = threadIdx.x / WARP;
int i, old_count, old_first;
volatile __shared__ int s_first[BLOCKDIM / WARP];
volatile __shared__ int s_count[BLOCKDIM / WARP];
volatile int *vjlev = jlev;
volatile int *vfirst = first;
do {
if (lane == 0) {
old_count = atomicDec0(count);
if (old_count > 0) {
old_first = atomicAdd(first, 1);
} else {
old_first = *vfirst;
}
s_first[wlane] = old_first;
s_count[wlane] = old_count;
}
old_first = s_first[wlane];
old_count = s_count[wlane];
if (old_count > 0) {
while ((i = vjlev[old_first]) == 0);
--i;
int q1 = db[i] + 1;
int q2 = ib[i+1];
for (int j = q1 + lane; j < q2; j += WARP) {
int k = jb[j-1]-1;
int old = atomicSub(&dp[k], 1);
if (old == 1) {
int p = atomicAdd(last, 1);
vjlev[p] = k + 1;
atomicAdd(count, 1);
}
}
}
} while (old_first < n);
}
__global__ void TOPO_CSC_U(int n, int *ib, int *jb, int *db, int *dp,
int *jlev, int *last, int *first, int *count) {
// thread lane in each warp
const int lane = threadIdx.x & (WARP - 1);
// local warp id
const int wlane = threadIdx.x / WARP;
int i, old_count, old_first;
volatile __shared__ int s_first[BLOCKDIM / WARP];
volatile __shared__ int s_count[BLOCKDIM / WARP];
volatile int *vjlev = jlev;
volatile int *vfirst = first;
do {
if (lane == 0) {
old_count = atomicDec0(count);
if (old_count > 0) {
old_first = atomicAdd(first, 1);
} else {
old_first = *vfirst;
}
s_first[wlane] = old_first;
s_count[wlane] = old_count;
}
old_first = s_first[wlane];
old_count = s_count[wlane];
if (old_count > 0) {
while ((i = vjlev[old_first]) == 0);
--i;
int q1 = ib[i];
int q2 = db[i];
for (int j = q1 + lane; j < q2; j += WARP) {
int k = jb[j-1]-1;
int old = atomicSub(&dp[k], 1);
if (old == 1) {
int p = atomicAdd(last, 1);
vjlev[p] = k + 1;
atomicAdd(count, 1);
}
}
}
} while (old_first < n);
}
void makeTopoCSC(int n, int *d_ib, int *d_jb, int *d_db,
int *d_dp, int *d_jlevL, int *d_jlevU) {
int gDim;
int *d_dpL = d_dp;
int *d_dpU = d_dp + n;
int *d_ptr, *d_first, *d_last, *d_count;
hipMalloc((void **)&d_ptr, 3*sizeof(int));
d_first = d_ptr;
d_last = d_ptr + 1;
d_count = d_ptr + 2;
int nthreads = 500 * WARP;
// L
hipMemset(d_ptr, 0, 3*sizeof(int));
hipMemset(d_jlevL, 0, n*sizeof(int));
gDim = (n + BLOCKDIM - 1) / BLOCKDIM;
hipLaunchKernelGGL(( LEVEL_CSC_SYNC_0), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_dpL, d_jlevL, d_count);
hipMemcpy(d_last, d_count, sizeof(int), hipMemcpyDeviceToDevice);
gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM;
hipLaunchKernelGGL(( TOPO_CSC_L), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_ib, d_jb, d_db, d_dpL, d_jlevL, d_last, d_first, d_count);
// U
hipMemset(d_ptr, 0, 3*sizeof(int));
hipMemset(d_jlevU, 0, n*sizeof(int));
gDim = (n + BLOCKDIM - 1) / BLOCKDIM;
hipLaunchKernelGGL(( LEVEL_CSC_SYNC_0), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_dpU, d_jlevU, d_count);
hipMemcpy(d_last, d_count, sizeof(int), hipMemcpyDeviceToDevice);
gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM;
hipLaunchKernelGGL(( TOPO_CSC_U), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_ib, d_jb, d_db, d_dpU, d_jlevU, d_last, d_first, d_count);
hipFree(d_ptr);
}
void checktopo(int n, int *ib, int *jb, int *db, int *d_jlevL,
int *d_jlevU, int *d_dp) {
int *jlevL = (int *) malloc(n*sizeof(int));
int *jlevU = (int *) malloc(n*sizeof(int));
int *dp = (int *) malloc(2*n*sizeof(int));
hipMemcpy(jlevL, d_jlevL, n*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(jlevU, d_jlevU, n*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(dp, d_dp, 2*n*sizeof(int), hipMemcpyDeviceToHost);
int *dpL = dp;
int *dpU = dp + n;
//for (int i = 0; i < n; i++) { printf("%d ", jlevL[i]); } printf("\n");
//for (int i = 0; i < n; i++) { printf("%d ", jlevU[i]); } printf("\n");
for (int i = 0; i < n; i++) {
int jl = jlevL[i];
int ju = jlevU[i];
if (jl < 1 || jl > n) {
printf("topo error: jl = %d\n", jl);
exit(0);
}
if (ju < 1 || ju > n) {
printf("topo error: ju = %d\n", ju);
exit(0);
}
if (dpL[jl-1] != 0) {
printf("topo error: dpL[%d] = %d\n", jl-1, dpL[jl-1]);
exit(0);
}
if (dpU[ju-1] != 0) {
printf("topo error: dpU[%d] = %d\n", ju-1, dpU[ju-1]);
exit(0);
}
for (int j = db[jl-1]+1; j < ib[jl]; j++) {
int k = jb[j-1]-1;
dpL[k]--;
}
for (int j = ib[ju-1]; j < db[ju-1]; j++) {
int k = jb[j-1]-1;
dpU[k]--;
}
}
free(jlevL);
free(jlevU);
free(dp);
}
| 2d26a395d33201edb4c6cfe673c617308a6906b0.cu | #include "lusol.h"
__global__ void LEVEL_CSC_SYNC_0(int n, int *dp, int *jlev, int *tail);
__forceinline__ __device__
static int atomicDec0(int *address) {
volatile int *vaddr = address;
int old = *vaddr, assumed;
do {
assumed = old;
int newv = assumed > 0 ? assumed-1 : 0;
old = atomicCAS(address, assumed, newv);
} while (old != assumed);
return old;
}
__global__ void TOPO_CSC_L(int n, int *ib, int *jb, int *db, int *dp,
int *jlev, int *last, int *first, int *count) {
// thread lane in each warp
const int lane = threadIdx.x & (WARP - 1);
// local warp id
const int wlane = threadIdx.x / WARP;
int i, old_count, old_first;
volatile __shared__ int s_first[BLOCKDIM / WARP];
volatile __shared__ int s_count[BLOCKDIM / WARP];
volatile int *vjlev = jlev;
volatile int *vfirst = first;
do {
if (lane == 0) {
old_count = atomicDec0(count);
if (old_count > 0) {
old_first = atomicAdd(first, 1);
} else {
old_first = *vfirst;
}
s_first[wlane] = old_first;
s_count[wlane] = old_count;
}
old_first = s_first[wlane];
old_count = s_count[wlane];
if (old_count > 0) {
while ((i = vjlev[old_first]) == 0);
--i;
int q1 = db[i] + 1;
int q2 = ib[i+1];
for (int j = q1 + lane; j < q2; j += WARP) {
int k = jb[j-1]-1;
int old = atomicSub(&dp[k], 1);
if (old == 1) {
int p = atomicAdd(last, 1);
vjlev[p] = k + 1;
atomicAdd(count, 1);
}
}
}
} while (old_first < n);
}
__global__ void TOPO_CSC_U(int n, int *ib, int *jb, int *db, int *dp,
int *jlev, int *last, int *first, int *count) {
// thread lane in each warp
const int lane = threadIdx.x & (WARP - 1);
// local warp id
const int wlane = threadIdx.x / WARP;
int i, old_count, old_first;
volatile __shared__ int s_first[BLOCKDIM / WARP];
volatile __shared__ int s_count[BLOCKDIM / WARP];
volatile int *vjlev = jlev;
volatile int *vfirst = first;
do {
if (lane == 0) {
old_count = atomicDec0(count);
if (old_count > 0) {
old_first = atomicAdd(first, 1);
} else {
old_first = *vfirst;
}
s_first[wlane] = old_first;
s_count[wlane] = old_count;
}
old_first = s_first[wlane];
old_count = s_count[wlane];
if (old_count > 0) {
while ((i = vjlev[old_first]) == 0);
--i;
int q1 = ib[i];
int q2 = db[i];
for (int j = q1 + lane; j < q2; j += WARP) {
int k = jb[j-1]-1;
int old = atomicSub(&dp[k], 1);
if (old == 1) {
int p = atomicAdd(last, 1);
vjlev[p] = k + 1;
atomicAdd(count, 1);
}
}
}
} while (old_first < n);
}
void makeTopoCSC(int n, int *d_ib, int *d_jb, int *d_db,
int *d_dp, int *d_jlevL, int *d_jlevU) {
int gDim;
int *d_dpL = d_dp;
int *d_dpU = d_dp + n;
int *d_ptr, *d_first, *d_last, *d_count;
cudaMalloc((void **)&d_ptr, 3*sizeof(int));
d_first = d_ptr;
d_last = d_ptr + 1;
d_count = d_ptr + 2;
int nthreads = 500 * WARP;
// L
cudaMemset(d_ptr, 0, 3*sizeof(int));
cudaMemset(d_jlevL, 0, n*sizeof(int));
gDim = (n + BLOCKDIM - 1) / BLOCKDIM;
LEVEL_CSC_SYNC_0<<<gDim, BLOCKDIM>>>(n, d_dpL, d_jlevL, d_count);
cudaMemcpy(d_last, d_count, sizeof(int), cudaMemcpyDeviceToDevice);
gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM;
TOPO_CSC_L<<<gDim, BLOCKDIM>>>(n, d_ib, d_jb, d_db, d_dpL, d_jlevL, d_last, d_first, d_count);
// U
cudaMemset(d_ptr, 0, 3*sizeof(int));
cudaMemset(d_jlevU, 0, n*sizeof(int));
gDim = (n + BLOCKDIM - 1) / BLOCKDIM;
LEVEL_CSC_SYNC_0<<<gDim, BLOCKDIM>>>(n, d_dpU, d_jlevU, d_count);
cudaMemcpy(d_last, d_count, sizeof(int), cudaMemcpyDeviceToDevice);
gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM;
TOPO_CSC_U<<<gDim, BLOCKDIM>>>(n, d_ib, d_jb, d_db, d_dpU, d_jlevU, d_last, d_first, d_count);
cudaFree(d_ptr);
}
void checktopo(int n, int *ib, int *jb, int *db, int *d_jlevL,
int *d_jlevU, int *d_dp) {
int *jlevL = (int *) malloc(n*sizeof(int));
int *jlevU = (int *) malloc(n*sizeof(int));
int *dp = (int *) malloc(2*n*sizeof(int));
cudaMemcpy(jlevL, d_jlevL, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(jlevU, d_jlevU, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(dp, d_dp, 2*n*sizeof(int), cudaMemcpyDeviceToHost);
int *dpL = dp;
int *dpU = dp + n;
//for (int i = 0; i < n; i++) { printf("%d ", jlevL[i]); } printf("\n");
//for (int i = 0; i < n; i++) { printf("%d ", jlevU[i]); } printf("\n");
for (int i = 0; i < n; i++) {
int jl = jlevL[i];
int ju = jlevU[i];
if (jl < 1 || jl > n) {
printf("topo error: jl = %d\n", jl);
exit(0);
}
if (ju < 1 || ju > n) {
printf("topo error: ju = %d\n", ju);
exit(0);
}
if (dpL[jl-1] != 0) {
printf("topo error: dpL[%d] = %d\n", jl-1, dpL[jl-1]);
exit(0);
}
if (dpU[ju-1] != 0) {
printf("topo error: dpU[%d] = %d\n", ju-1, dpU[ju-1]);
exit(0);
}
for (int j = db[jl-1]+1; j < ib[jl]; j++) {
int k = jb[j-1]-1;
dpL[k]--;
}
for (int j = ib[ju-1]; j < db[ju-1]; j++) {
int k = jb[j-1]-1;
dpU[k]--;
}
}
free(jlevL);
free(jlevU);
free(dp);
}
|
706d5f06afb646367a75c4bf53877f5c1862c4be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/concat_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
template <typename Dtype>
void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (bottom.size() == 1) {
top[0]->ShareData(*bottom[0]);
}
Dtype* top_data = top[0]->mutable_gpu_data();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = true;
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom_data, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data);
offset_concat_axis += bottom_concat_axis;
}
}
template <typename Dtype>
void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bottom.size() == 1) {
bottom[0]->ShareDiff(*top[0]);
}
const Dtype* top_diff = top[0]->gpu_diff();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = false;
for (int i = 0; i < bottom.size(); ++i) {
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
if (propagate_down[i]) {
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, top_diff, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff);
}
offset_concat_axis += bottom_concat_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer);
} // namespace caffe
| 706d5f06afb646367a75c4bf53877f5c1862c4be.cu | #include <vector>
#include "caffe/layers/concat_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
template <typename Dtype>
void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (bottom.size() == 1) {
top[0]->ShareData(*bottom[0]);
}
Dtype* top_data = top[0]->mutable_gpu_data();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = true;
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom_data, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data);
offset_concat_axis += bottom_concat_axis;
}
}
template <typename Dtype>
void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bottom.size() == 1) {
bottom[0]->ShareDiff(*top[0]);
}
const Dtype* top_diff = top[0]->gpu_diff();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = false;
for (int i = 0; i < bottom.size(); ++i) {
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
if (propagate_down[i]) {
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top_diff, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff);
}
offset_concat_axis += bottom_concat_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer);
} // namespace caffe
|
0047615bbc3780a438fbb68fd8b56735b20f4c81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* initialization of the change flag
*/
namespace nscale { namespace gpu {
__global__ void init_change( bool *change ) {
*change = false;
}
}}
| 0047615bbc3780a438fbb68fd8b56735b20f4c81.cu | /*
* initialization of the change flag
*/
namespace nscale { namespace gpu {
__global__ void init_change( bool *change ) {
*change = false;
}
}}
|
20276a82972d0831715d04572edb4fbad34db2b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <iostream>
#include <vector>
#include <chrono>
#include "parameters.h"
/*
YTZ Notes:
This kernel implements the ANI-1 featurization scheme. It can process about 3.6 million samples/minute
*/
inline __device__ float dist_diff(float dx, float dy, float dz) {
return sqrt(dx*dx+dy*dy+dz*dz);
}
inline __device__ float f_C(float r_ij, float r_c) {
if (r_ij <= r_c) {
return 0.5 * cosf((M_PI * r_ij) / r_c) + 0.5;
} else {
return 0;
}
}
// Linearizes the diagonal-inclusive upper right
// triangle of the symmetric ranks 0 and 1 of a rank-4 tensor
// into a linear index
inline __device__ int linearize(int i, int j, int k, int l) {
if(j < i) {
float tmp = i;
i = j;
j = tmp;
}
const auto N = MAX_ATOM_TYPES;
const auto K = NUM_A_THETAS;
const auto L = NUM_A_RS;
int basis = (N*(N-1)/2 - (N-i) * (N-i-1)/2 +j);
return basis*K*L + k*L + l;
}
__global__ void inverse(
const int *sort_idxs,
int *gather_idxs,
size_t n_elems) {
int elem_idx = blockDim.x*blockIdx.x + threadIdx.x;
if(elem_idx < n_elems) {
gather_idxs[sort_idxs[elem_idx]] = elem_idx;
}
}
__global__ void scatter(
const int *sorted_global_idxs,
const int *sorted_local_idxs,
int *scatter_idxs,
size_t n_elems) {
int elem_idx = blockDim.x*blockIdx.x + threadIdx.x;
if(elem_idx < n_elems) {
scatter_idxs[sorted_global_idxs[elem_idx]] = sorted_local_idxs[elem_idx];
}
}
// Remind yutong to document what these pointers are.
__global__ void featurize(
const float *Xs,
const float *Ys,
const float *Zs,
const int *atomic_nums,
const int *mol_offsets,
const int *mol_atom_count,
const int num_mols, // actually equal to blockDim.x
const int *scatter_idxs, // LOCAL WITHIN THE ATOM TYPE
float *X_feat_out_H,
float *X_feat_out_C,
float *X_feat_out_N,
float *X_feat_out_O) {
int mol_idx = blockIdx.x;
int num_atoms = mol_atom_count[blockIdx.x];
int block_size = blockDim.x;
int num_warps = (num_atoms + block_size - 1)/block_size; // how many warps we need to process
for(int warp_idx = 0; warp_idx < num_warps; warp_idx++) {
int local_atom_idx = warp_idx*block_size + threadIdx.x; // local_local_atom_idx
if (local_atom_idx >= num_atoms) {
return;
}
// todo: cache into shared mem
// load all the x y z coordinates
int g_atom_idx_i = mol_offsets[mol_idx]+local_atom_idx;
int g_atomic_num_i = atomic_nums[g_atom_idx_i];
float i_x = Xs[g_atom_idx_i];
float i_y = Ys[g_atom_idx_i];
float i_z = Zs[g_atom_idx_i];
// printf("%d %d %d (%f, %f, %f)\n", mol_idx, local_atom_idx, num_atoms, i_x, i_y, i_z);
// float *X_feat_i = X_feat_out + scatter_idxs[g_atom_idx_i];
// if(Atom)
float *X_feat_out_i;
if(g_atomic_num_i == 0) {
X_feat_out_i = X_feat_out_H;
} else if(g_atomic_num_i == 1) {
X_feat_out_i = X_feat_out_C;
} else if(g_atomic_num_i == 2) {
X_feat_out_i = X_feat_out_N;
} else {
X_feat_out_i = X_feat_out_O;
}
float *radial_feature_buffer_i = X_feat_out_i + scatter_idxs[g_atom_idx_i]*TOTAL_FEATURE_SIZE + 0;
float *angular_feature_buffer_i = X_feat_out_i + scatter_idxs[g_atom_idx_i]*TOTAL_FEATURE_SIZE + RADIAL_FEATURE_SIZE;
for(int j=0; j < num_atoms; j++) {
int g_atom_idx_j = mol_offsets[mol_idx]+j;
int g_atomic_num_j = atomic_nums[g_atom_idx_j];
float j_x = Xs[g_atom_idx_j];
float j_y = Ys[g_atom_idx_j];
float j_z = Zs[g_atom_idx_j];
float d_ij_x = i_x - j_x;
float d_ij_y = i_y - j_y;
float d_ij_z = i_z - j_z;
// printf("(%f, %f, %f)\n", d_ij_x, d_ij_y, d_ij_z);
float r_ij = dist_diff(d_ij_x, d_ij_y, d_ij_z);
// float *X_feat_j = X_feat_out + scatter_idxs[g_atom_idx_j];
float *X_feat_out_j;
if(g_atomic_num_j == 0) {
X_feat_out_j = X_feat_out_H;
} else if(g_atomic_num_j == 1) {
X_feat_out_j = X_feat_out_C;
} else if(g_atomic_num_j == 2) {
X_feat_out_j = X_feat_out_N;
} else {
X_feat_out_j = X_feat_out_O;
}
float *radial_feature_buffer_j = X_feat_out_j + scatter_idxs[g_atom_idx_j]*TOTAL_FEATURE_SIZE + 0;
// float *radial_feature_buffer_j = X_feat_j + g_atom_idx_j*TOTAL_FEATURE_SIZE + 0;
// float *angular_feature_buffer_j = X_feat_out + g_atom_idx_j*TOTAL_FEATURE_SIZE + RADIAL_FEATURE_SIZE;
// if(g_atom_idx_i == 0) {
// printf("gpu j %d %f\n", j, r_ij);
// printf("summand, offset, %f, %d\n", summand, scatter_idxs[g_atom_idx_i]*TOTAL_FEATURE_SIZE + atomic_nums[g_atom_idx_j] * NUM_R_Rs + r_idx);
// printf("summand, offset, %f, %d\n", summand, scatter_idxs[g_atom_idx_i]*TOTAL_FEATURE_SIZE + atomic_nums[g_atom_idx_j] * NUM_R_Rs + r_idx);
// }
// radial features
if(r_ij < R_Rc && local_atom_idx < j) {
for(int r_idx = 0; r_idx < NUM_R_Rs; r_idx++) {
float summand = expf(-R_eta * powf(r_ij - R_Rs[r_idx], 2.0)) * f_C(r_ij, R_Rc);
// exploit symmetry of the atomic adds
auto res1 = atomicAdd(radial_feature_buffer_i + atomic_nums[g_atom_idx_j] * NUM_R_Rs + r_idx, summand);
auto res2 = atomicAdd(radial_feature_buffer_j + atomic_nums[g_atom_idx_i] * NUM_R_Rs + r_idx, summand);
//if(isnan(res1) || isinf(res1)) {
// printf("WTF RADIAL RES1 NAN/INF, offset, %f, %f\n", res1, summand);
// : %d, %d, %d, r_ij, r_ik, %f, %f, top %f, bottom %f, i_coords:(%f, %f, %f), j_coords(%f, %f, %f), k_coords(%f, %f, %f)\n",
// g_atom_idx_i, g_atom_idx_j, g_atom_idx_k, r_ij, r_ik, d_ij_x*d_ik_x + d_ij_y*d_ik_y + d_ij_z*d_ik_z, r_ij * r_ik, i_x, i_y, i_z, j_x, j_y, j_z, k_x, k_y, k_z);
//}
//if(isnan(res2) || isinf(res2)) {
// printf("WTF RADIAL RES2 NAN/INF, offset, %f, %f\n", res2, summand);
// : %d, %d, %d, r_ij, r_ik, %f, %f, top %f, bottom %f, i_coords:(%f, %f, %f), j_coords(%f, %f, %f), k_coords(%f, %f, %f)\n",
// g_atom_idx_i, g_atom_idx_j, g_atom_idx_k, r_ij, r_ik, d_ij_x*d_ik_x + d_ij_y*d_ik_y + d_ij_z*d_ik_z, r_ij * r_ik, i_x, i_y, i_z, j_x, j_y, j_z, k_x, k_y, k_z);
//}
}
}
float A_f_C_ij = f_C(r_ij, A_Rc);
if(r_ij < A_Rc) {
for(size_t k=j+1; k < num_atoms; k++) {
if(local_atom_idx == j || local_atom_idx == k || j == k) {
continue;
}
// const int an_i = atomic_nums[local_atom_idx];
int g_atom_idx_k = mol_offsets[mol_idx]+k;
const int an_j = atomic_nums[g_atom_idx_j];
const int an_k = atomic_nums[g_atom_idx_k];
float k_x = Xs[g_atom_idx_k];
float k_y = Ys[g_atom_idx_k];
float k_z = Zs[g_atom_idx_k];
float d_ik_x = i_x - k_x;
float d_ik_y = i_y - k_y;
float d_ik_z = i_z - k_z;
float r_ik = dist_diff(d_ik_x, d_ik_y, d_ik_z);
if(r_ik < A_Rc) {
// TODO(YTZ): replace with arctan2 trick
float inner = (d_ij_x*d_ik_x + d_ij_y*d_ik_y + d_ij_z*d_ik_z) / (r_ij * r_ik);
inner = fmaxf(inner, -1.0);
inner = fminf(inner, 1.0);
// printf("INNER %f\n", inner);
float theta_ijk = acosf(inner);
// super useful debug
if(isnan(theta_ijk) || isinf(theta_ijk)) {
printf("WTF NAN/INF: %d, %d, %d, r_ij, r_ik, %f, %f, top %f, bottom %f, i_coords:(%f, %f, %f), j_coords(%f, %f, %f), k_coords(%f, %f, %f)\n",
g_atom_idx_i, g_atom_idx_j, g_atom_idx_k, r_ij, r_ik, d_ij_x*d_ik_x + d_ij_y*d_ik_y + d_ij_z*d_ik_z, r_ij * r_ik, i_x, i_y, i_z, j_x, j_y, j_z, k_x, k_y, k_z);
}
// printf("gpu tijk %d %d %d %f\n", local_atom_idx, j, k, theta_ijk);
float A_f_C_ik = f_C(r_ik, A_Rc);
for(int t=0; t < NUM_A_THETAS; t++) {
for(int s=0; s < NUM_A_RS; s++) {
// (TODO: ytz) do 2*(1-A_Zeta) at the end
float summand = powf(2, 1-A_zeta) * powf(1+cosf(theta_ijk - A_thetas[t]), A_zeta) * expf(-A_eta*powf((r_ij + r_ik)/2 - A_Rs[s], 2)) * A_f_C_ij * A_f_C_ik;
// printf("summand: %f, \n", summand);
// printf("scatter_idxs[g_atom_idx_i]: %d, linearize: %d\n", scatter_idxs[g_atom_idx_i], linearize(an_j, an_k, t, s));
// printf("i,j,k,t,s %d %d %d %d %d %d\n", g_atom_idx_i, g_atom_idx_j, g_atom_idx_k, at_idx, ar_idx, linearize(j_type, k_type, at_idx, ar_idx))
auto res = atomicAdd(angular_feature_buffer_i + linearize(an_j, an_k, t, s), summand);
// if(isnan(res) || isinf(res)) {
// printf("WTF ANGULAR SUMMAND NAN/INF: %d, %d, %d, r_ij, r_ik, %f, %f, top %f, bottom %f, i_coords:(%f, %f, %f), j_coords(%f, %f, %f), k_coords(%f, %f, %f)\n",
// g_atom_idx_i, g_atom_idx_j, g_atom_idx_k, r_ij, r_ik, d_ij_x*d_ik_x + d_ij_y*d_ik_y + d_ij_z*d_ik_z, r_ij * r_ik, i_x, i_y, i_z, j_x, j_y, j_z, k_x, k_y, k_z);
// }
}
}
}
}
} // end radial
} // end current warp
} // end all warps
}
template<typename T>
T *cudaMallocSimple(size_t n) {
T *d_obj;
std::cout << "mallocing:" << n*sizeof(T) << "bytes\n";
assert(hipMalloc(&d_obj, n*sizeof(T)) == 0);
return d_obj;
}
template<typename T>
void cudaCopySimple(T *obj, size_t n, T *d_obj) {
assert(hipMemcpy(d_obj, obj, n*sizeof(T), hipMemcpyHostToDevice) == 0);
}
typedef std::chrono::high_resolution_clock Clock;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// int main(void) {
// auto X_obj = cnpy::npy_load("Xs.npy");
// auto Y_obj = cnpy::npy_load("Ys.npy");
// auto Z_obj = cnpy::npy_load("Zs.npy");
// auto A_obj = cnpy::npy_load("As.npy");
// auto MOs = cnpy::npy_load("MOs.npy");
// auto MACs = cnpy::npy_load("MACs.npy");
// float *d_Xs = cudaMallocSimple<float>(X_obj.shape[0]);
// float *d_Ys = cudaMallocSimple<float>(Y_obj.shape[0]);
// float *d_Zs = cudaMallocSimple<float>(Z_obj.shape[0]);
// int *d_As = cudaMallocSimple<int>(A_obj.shape[0]);
// int *d_MOs = cudaMallocSimple<int>(MOs.shape[0]);
// int *d_MACs = cudaMallocSimple<int>(MACs.shape[0]); // max
// size_t n_total_atoms = X_obj.shape[0];
// size_t n_mols = MOs.shape[0];
// int sort_num_items = n_total_atoms; // change to upperbound later, max number of atoms per block
// int *d_vals_in = cudaMallocSimple<int>(sort_num_items);
// int *sort_idxs = cudaMallocSimple<int>(sort_num_items);
// int *inv_idxs = cudaMallocSimple<int>(sort_num_items);
// std::vector<int> idxs(sort_num_items);
// for(size_t i=0; i < sort_num_items; i++) {
// idxs[i] = i;
// }
// cudaCopySimple(&idxs[0], sort_num_items, d_vals_in);
// int *d_keys_out = cudaMallocSimple<int>(sort_num_items);
// void *d_temp_storage = NULL;
// size_t temp_storage_bytes = 0;
// // determine size requirements
// gpuErrchk(hipcub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_As, d_keys_out, d_vals_in, sort_idxs, sort_num_items));
// gpuErrchk(hipMalloc(&d_temp_storage, temp_storage_bytes)) ;
// // SETUP DONE
// float* d_X_feat;
// printf("Total number of atoms: %d \n", n_total_atoms);
// std::cout << "mallocing:" << n_total_atoms*TOTAL_FEATURE_SIZE*sizeof(float) << "bytes\n";
// hipMalloc(&d_X_feat, n_total_atoms*TOTAL_FEATURE_SIZE*sizeof(float));
// auto start = Clock::now();
// // int i=0;
// for(size_t i=0; i < 100000; i++) {
// int num_items = n_total_atoms; // upper bound this to a fixed num
// std::cout << i << std::endl;
// cudaCopySimple(X_obj.data<float>(), X_obj.shape[0], d_Xs);
// cudaCopySimple(Y_obj.data<float>(), Y_obj.shape[0], d_Ys);
// cudaCopySimple(Z_obj.data<float>(), Z_obj.shape[0], d_Zs);
// cudaCopySimple(A_obj.data<int>(), A_obj.shape[0], d_As);
// cudaCopySimple(MOs.data<int>(), MOs.shape[0], d_MOs);
// cudaCopySimple(MACs.data<int>(), MACs.shape[0], d_MACs); // max
// assert(hipMemset(d_X_feat, 0, n_total_atoms*TOTAL_FEATURE_SIZE*sizeof(float)) == 0);
// // atom type counters
// std::vector<int> counts(MAX_ATOM_TYPES, 0);
// for(size_t j=0; j < n_total_atoms; j++) {
// counts[A_obj.data<int>()[j]] += 1;
// }
// // 1. Sort by atom pairs.
// // 2.
// // GPU
// gpuErrchk(hipcub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_As, d_keys_out, d_vals_in, sort_idxs, sort_num_items));
// inverse<<<n_mols, 32>>>(sort_idxs, inv_idxs, sort_num_items); // invert
// gpuErrchk(hipPeekAtLastError());
// // follow up with a segment reduce
// // std::vector<int> test(sort_num_items);
// // hipMemcpy(&test[0], inv_idxs, n_total_atoms*sizeof(int), hipMemcpyDeviceToHost);
// // for(auto v : test) {
// // std::cout << v << " ";
// // }
// // return;
// // CPU
// // std::vector<int> buffer(A_obj.data<int>(), A_obj.data<int>() + A_obj.shape[0]);
// // std::vector<int> h_sort_idx = sort_indexes(buffer);
// // for(size_t k=0; k < sort_num_items; k++) {
// // buffer[h_sort_idx[k]] = k;
// // }
// // cudaCopySimple(&buffer[0], sort_num_items, inv_idxs);
// //START
// featurize<<<n_mols, 32>>>(
// d_Xs,
// d_Ys,
// d_Zs,
// d_As,
// d_MOs,
// d_MACs,
// n_mols,
// inv_idxs,
// d_X_feat);
// gpuErrchk( hipPeekAtLastError() );
// auto end = Clock::now();
// auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
// std::cout << "DURATION:" << duration << std::endl;
// std::cout << "samples per minute:" << (float((i+1)*n_mols) / duration) * 60 * 1e9 << std::endl;
// }
// std::vector<float> X_feat(n_total_atoms*TOTAL_FEATURE_SIZE, 0);
// hipMemcpy(&X_feat[0], d_X_feat, n_total_atoms*TOTAL_FEATURE_SIZE*sizeof(int), hipMemcpyDeviceToHost);
// }
| 20276a82972d0831715d04572edb4fbad34db2b1.cu | #include <algorithm>
#include <iostream>
#include <vector>
#include <chrono>
#include "parameters.h"
/*
YTZ Notes:
This kernel implements the ANI-1 featurization scheme. It can process about 3.6 million samples/minute
*/
inline __device__ float dist_diff(float dx, float dy, float dz) {
return sqrt(dx*dx+dy*dy+dz*dz);
}
inline __device__ float f_C(float r_ij, float r_c) {
if (r_ij <= r_c) {
return 0.5 * cosf((M_PI * r_ij) / r_c) + 0.5;
} else {
return 0;
}
}
// Linearizes the diagonal-inclusive upper right
// triangle of the symmetric ranks 0 and 1 of a rank-4 tensor
// into a linear index
inline __device__ int linearize(int i, int j, int k, int l) {
if(j < i) {
float tmp = i;
i = j;
j = tmp;
}
const auto N = MAX_ATOM_TYPES;
const auto K = NUM_A_THETAS;
const auto L = NUM_A_RS;
int basis = (N*(N-1)/2 - (N-i) * (N-i-1)/2 +j);
return basis*K*L + k*L + l;
}
__global__ void inverse(
const int *sort_idxs,
int *gather_idxs,
size_t n_elems) {
int elem_idx = blockDim.x*blockIdx.x + threadIdx.x;
if(elem_idx < n_elems) {
gather_idxs[sort_idxs[elem_idx]] = elem_idx;
}
}
__global__ void scatter(
const int *sorted_global_idxs,
const int *sorted_local_idxs,
int *scatter_idxs,
size_t n_elems) {
int elem_idx = blockDim.x*blockIdx.x + threadIdx.x;
if(elem_idx < n_elems) {
scatter_idxs[sorted_global_idxs[elem_idx]] = sorted_local_idxs[elem_idx];
}
}
// Remind yutong to document what these pointers are.
__global__ void featurize(
const float *Xs,
const float *Ys,
const float *Zs,
const int *atomic_nums,
const int *mol_offsets,
const int *mol_atom_count,
const int num_mols, // actually equal to blockDim.x
const int *scatter_idxs, // LOCAL WITHIN THE ATOM TYPE
float *X_feat_out_H,
float *X_feat_out_C,
float *X_feat_out_N,
float *X_feat_out_O) {
int mol_idx = blockIdx.x;
int num_atoms = mol_atom_count[blockIdx.x];
int block_size = blockDim.x;
int num_warps = (num_atoms + block_size - 1)/block_size; // how many warps we need to process
for(int warp_idx = 0; warp_idx < num_warps; warp_idx++) {
int local_atom_idx = warp_idx*block_size + threadIdx.x; // local_local_atom_idx
if (local_atom_idx >= num_atoms) {
return;
}
// todo: cache into shared mem
// load all the x y z coordinates
int g_atom_idx_i = mol_offsets[mol_idx]+local_atom_idx;
int g_atomic_num_i = atomic_nums[g_atom_idx_i];
float i_x = Xs[g_atom_idx_i];
float i_y = Ys[g_atom_idx_i];
float i_z = Zs[g_atom_idx_i];
// printf("%d %d %d (%f, %f, %f)\n", mol_idx, local_atom_idx, num_atoms, i_x, i_y, i_z);
// float *X_feat_i = X_feat_out + scatter_idxs[g_atom_idx_i];
// if(Atom)
float *X_feat_out_i;
if(g_atomic_num_i == 0) {
X_feat_out_i = X_feat_out_H;
} else if(g_atomic_num_i == 1) {
X_feat_out_i = X_feat_out_C;
} else if(g_atomic_num_i == 2) {
X_feat_out_i = X_feat_out_N;
} else {
X_feat_out_i = X_feat_out_O;
}
float *radial_feature_buffer_i = X_feat_out_i + scatter_idxs[g_atom_idx_i]*TOTAL_FEATURE_SIZE + 0;
float *angular_feature_buffer_i = X_feat_out_i + scatter_idxs[g_atom_idx_i]*TOTAL_FEATURE_SIZE + RADIAL_FEATURE_SIZE;
for(int j=0; j < num_atoms; j++) {
int g_atom_idx_j = mol_offsets[mol_idx]+j;
int g_atomic_num_j = atomic_nums[g_atom_idx_j];
float j_x = Xs[g_atom_idx_j];
float j_y = Ys[g_atom_idx_j];
float j_z = Zs[g_atom_idx_j];
float d_ij_x = i_x - j_x;
float d_ij_y = i_y - j_y;
float d_ij_z = i_z - j_z;
// printf("(%f, %f, %f)\n", d_ij_x, d_ij_y, d_ij_z);
float r_ij = dist_diff(d_ij_x, d_ij_y, d_ij_z);
// float *X_feat_j = X_feat_out + scatter_idxs[g_atom_idx_j];
float *X_feat_out_j;
if(g_atomic_num_j == 0) {
X_feat_out_j = X_feat_out_H;
} else if(g_atomic_num_j == 1) {
X_feat_out_j = X_feat_out_C;
} else if(g_atomic_num_j == 2) {
X_feat_out_j = X_feat_out_N;
} else {
X_feat_out_j = X_feat_out_O;
}
float *radial_feature_buffer_j = X_feat_out_j + scatter_idxs[g_atom_idx_j]*TOTAL_FEATURE_SIZE + 0;
// float *radial_feature_buffer_j = X_feat_j + g_atom_idx_j*TOTAL_FEATURE_SIZE + 0;
// float *angular_feature_buffer_j = X_feat_out + g_atom_idx_j*TOTAL_FEATURE_SIZE + RADIAL_FEATURE_SIZE;
// if(g_atom_idx_i == 0) {
// printf("gpu j %d %f\n", j, r_ij);
// printf("summand, offset, %f, %d\n", summand, scatter_idxs[g_atom_idx_i]*TOTAL_FEATURE_SIZE + atomic_nums[g_atom_idx_j] * NUM_R_Rs + r_idx);
// printf("summand, offset, %f, %d\n", summand, scatter_idxs[g_atom_idx_i]*TOTAL_FEATURE_SIZE + atomic_nums[g_atom_idx_j] * NUM_R_Rs + r_idx);
// }
// radial features
if(r_ij < R_Rc && local_atom_idx < j) {
for(int r_idx = 0; r_idx < NUM_R_Rs; r_idx++) {
float summand = expf(-R_eta * powf(r_ij - R_Rs[r_idx], 2.0)) * f_C(r_ij, R_Rc);
// exploit symmetry of the atomic adds
auto res1 = atomicAdd(radial_feature_buffer_i + atomic_nums[g_atom_idx_j] * NUM_R_Rs + r_idx, summand);
auto res2 = atomicAdd(radial_feature_buffer_j + atomic_nums[g_atom_idx_i] * NUM_R_Rs + r_idx, summand);
//if(isnan(res1) || isinf(res1)) {
// printf("WTF RADIAL RES1 NAN/INF, offset, %f, %f\n", res1, summand);
// : %d, %d, %d, r_ij, r_ik, %f, %f, top %f, bottom %f, i_coords:(%f, %f, %f), j_coords(%f, %f, %f), k_coords(%f, %f, %f)\n",
// g_atom_idx_i, g_atom_idx_j, g_atom_idx_k, r_ij, r_ik, d_ij_x*d_ik_x + d_ij_y*d_ik_y + d_ij_z*d_ik_z, r_ij * r_ik, i_x, i_y, i_z, j_x, j_y, j_z, k_x, k_y, k_z);
//}
//if(isnan(res2) || isinf(res2)) {
// printf("WTF RADIAL RES2 NAN/INF, offset, %f, %f\n", res2, summand);
// : %d, %d, %d, r_ij, r_ik, %f, %f, top %f, bottom %f, i_coords:(%f, %f, %f), j_coords(%f, %f, %f), k_coords(%f, %f, %f)\n",
// g_atom_idx_i, g_atom_idx_j, g_atom_idx_k, r_ij, r_ik, d_ij_x*d_ik_x + d_ij_y*d_ik_y + d_ij_z*d_ik_z, r_ij * r_ik, i_x, i_y, i_z, j_x, j_y, j_z, k_x, k_y, k_z);
//}
}
}
float A_f_C_ij = f_C(r_ij, A_Rc);
if(r_ij < A_Rc) {
for(size_t k=j+1; k < num_atoms; k++) {
if(local_atom_idx == j || local_atom_idx == k || j == k) {
continue;
}
// const int an_i = atomic_nums[local_atom_idx];
int g_atom_idx_k = mol_offsets[mol_idx]+k;
const int an_j = atomic_nums[g_atom_idx_j];
const int an_k = atomic_nums[g_atom_idx_k];
float k_x = Xs[g_atom_idx_k];
float k_y = Ys[g_atom_idx_k];
float k_z = Zs[g_atom_idx_k];
float d_ik_x = i_x - k_x;
float d_ik_y = i_y - k_y;
float d_ik_z = i_z - k_z;
float r_ik = dist_diff(d_ik_x, d_ik_y, d_ik_z);
if(r_ik < A_Rc) {
// TODO(YTZ): replace with arctan2 trick
float inner = (d_ij_x*d_ik_x + d_ij_y*d_ik_y + d_ij_z*d_ik_z) / (r_ij * r_ik);
inner = fmaxf(inner, -1.0);
inner = fminf(inner, 1.0);
// printf("INNER %f\n", inner);
float theta_ijk = acosf(inner);
// super useful debug
if(isnan(theta_ijk) || isinf(theta_ijk)) {
printf("WTF NAN/INF: %d, %d, %d, r_ij, r_ik, %f, %f, top %f, bottom %f, i_coords:(%f, %f, %f), j_coords(%f, %f, %f), k_coords(%f, %f, %f)\n",
g_atom_idx_i, g_atom_idx_j, g_atom_idx_k, r_ij, r_ik, d_ij_x*d_ik_x + d_ij_y*d_ik_y + d_ij_z*d_ik_z, r_ij * r_ik, i_x, i_y, i_z, j_x, j_y, j_z, k_x, k_y, k_z);
}
// printf("gpu tijk %d %d %d %f\n", local_atom_idx, j, k, theta_ijk);
float A_f_C_ik = f_C(r_ik, A_Rc);
for(int t=0; t < NUM_A_THETAS; t++) {
for(int s=0; s < NUM_A_RS; s++) {
// (TODO: ytz) do 2*(1-A_Zeta) at the end
float summand = powf(2, 1-A_zeta) * powf(1+cosf(theta_ijk - A_thetas[t]), A_zeta) * expf(-A_eta*powf((r_ij + r_ik)/2 - A_Rs[s], 2)) * A_f_C_ij * A_f_C_ik;
// printf("summand: %f, \n", summand);
// printf("scatter_idxs[g_atom_idx_i]: %d, linearize: %d\n", scatter_idxs[g_atom_idx_i], linearize(an_j, an_k, t, s));
// printf("i,j,k,t,s %d %d %d %d %d %d\n", g_atom_idx_i, g_atom_idx_j, g_atom_idx_k, at_idx, ar_idx, linearize(j_type, k_type, at_idx, ar_idx))
auto res = atomicAdd(angular_feature_buffer_i + linearize(an_j, an_k, t, s), summand);
// if(isnan(res) || isinf(res)) {
// printf("WTF ANGULAR SUMMAND NAN/INF: %d, %d, %d, r_ij, r_ik, %f, %f, top %f, bottom %f, i_coords:(%f, %f, %f), j_coords(%f, %f, %f), k_coords(%f, %f, %f)\n",
// g_atom_idx_i, g_atom_idx_j, g_atom_idx_k, r_ij, r_ik, d_ij_x*d_ik_x + d_ij_y*d_ik_y + d_ij_z*d_ik_z, r_ij * r_ik, i_x, i_y, i_z, j_x, j_y, j_z, k_x, k_y, k_z);
// }
}
}
}
}
} // end radial
} // end current warp
} // end all warps
}
template<typename T>
T *cudaMallocSimple(size_t n) {
T *d_obj;
std::cout << "mallocing:" << n*sizeof(T) << "bytes\n";
assert(cudaMalloc(&d_obj, n*sizeof(T)) == 0);
return d_obj;
}
template<typename T>
void cudaCopySimple(T *obj, size_t n, T *d_obj) {
assert(cudaMemcpy(d_obj, obj, n*sizeof(T), cudaMemcpyHostToDevice) == 0);
}
typedef std::chrono::high_resolution_clock Clock;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// int main(void) {
// auto X_obj = cnpy::npy_load("Xs.npy");
// auto Y_obj = cnpy::npy_load("Ys.npy");
// auto Z_obj = cnpy::npy_load("Zs.npy");
// auto A_obj = cnpy::npy_load("As.npy");
// auto MOs = cnpy::npy_load("MOs.npy");
// auto MACs = cnpy::npy_load("MACs.npy");
// float *d_Xs = cudaMallocSimple<float>(X_obj.shape[0]);
// float *d_Ys = cudaMallocSimple<float>(Y_obj.shape[0]);
// float *d_Zs = cudaMallocSimple<float>(Z_obj.shape[0]);
// int *d_As = cudaMallocSimple<int>(A_obj.shape[0]);
// int *d_MOs = cudaMallocSimple<int>(MOs.shape[0]);
// int *d_MACs = cudaMallocSimple<int>(MACs.shape[0]); // max
// size_t n_total_atoms = X_obj.shape[0];
// size_t n_mols = MOs.shape[0];
// int sort_num_items = n_total_atoms; // change to upperbound later, max number of atoms per block
// int *d_vals_in = cudaMallocSimple<int>(sort_num_items);
// int *sort_idxs = cudaMallocSimple<int>(sort_num_items);
// int *inv_idxs = cudaMallocSimple<int>(sort_num_items);
// std::vector<int> idxs(sort_num_items);
// for(size_t i=0; i < sort_num_items; i++) {
// idxs[i] = i;
// }
// cudaCopySimple(&idxs[0], sort_num_items, d_vals_in);
// int *d_keys_out = cudaMallocSimple<int>(sort_num_items);
// void *d_temp_storage = NULL;
// size_t temp_storage_bytes = 0;
// // determine size requirements
// gpuErrchk(cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_As, d_keys_out, d_vals_in, sort_idxs, sort_num_items));
// gpuErrchk(cudaMalloc(&d_temp_storage, temp_storage_bytes)) ;
// // SETUP DONE
// float* d_X_feat;
// printf("Total number of atoms: %d \n", n_total_atoms);
// std::cout << "mallocing:" << n_total_atoms*TOTAL_FEATURE_SIZE*sizeof(float) << "bytes\n";
// cudaMalloc(&d_X_feat, n_total_atoms*TOTAL_FEATURE_SIZE*sizeof(float));
// auto start = Clock::now();
// // int i=0;
// for(size_t i=0; i < 100000; i++) {
// int num_items = n_total_atoms; // upper bound this to a fixed num
// std::cout << i << std::endl;
// cudaCopySimple(X_obj.data<float>(), X_obj.shape[0], d_Xs);
// cudaCopySimple(Y_obj.data<float>(), Y_obj.shape[0], d_Ys);
// cudaCopySimple(Z_obj.data<float>(), Z_obj.shape[0], d_Zs);
// cudaCopySimple(A_obj.data<int>(), A_obj.shape[0], d_As);
// cudaCopySimple(MOs.data<int>(), MOs.shape[0], d_MOs);
// cudaCopySimple(MACs.data<int>(), MACs.shape[0], d_MACs); // max
// assert(cudaMemset(d_X_feat, 0, n_total_atoms*TOTAL_FEATURE_SIZE*sizeof(float)) == 0);
// // atom type counters
// std::vector<int> counts(MAX_ATOM_TYPES, 0);
// for(size_t j=0; j < n_total_atoms; j++) {
// counts[A_obj.data<int>()[j]] += 1;
// }
// // 1. Sort by atom pairs.
// // 2.
// // GPU
// gpuErrchk(cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_As, d_keys_out, d_vals_in, sort_idxs, sort_num_items));
// inverse<<<n_mols, 32>>>(sort_idxs, inv_idxs, sort_num_items); // invert
// gpuErrchk(cudaPeekAtLastError());
// // follow up with a segment reduce
// // std::vector<int> test(sort_num_items);
// // cudaMemcpy(&test[0], inv_idxs, n_total_atoms*sizeof(int), cudaMemcpyDeviceToHost);
// // for(auto v : test) {
// // std::cout << v << " ";
// // }
// // return;
// // CPU
// // std::vector<int> buffer(A_obj.data<int>(), A_obj.data<int>() + A_obj.shape[0]);
// // std::vector<int> h_sort_idx = sort_indexes(buffer);
// // for(size_t k=0; k < sort_num_items; k++) {
// // buffer[h_sort_idx[k]] = k;
// // }
// // cudaCopySimple(&buffer[0], sort_num_items, inv_idxs);
// //START
// featurize<<<n_mols, 32>>>(
// d_Xs,
// d_Ys,
// d_Zs,
// d_As,
// d_MOs,
// d_MACs,
// n_mols,
// inv_idxs,
// d_X_feat);
// gpuErrchk( cudaPeekAtLastError() );
// auto end = Clock::now();
// auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
// std::cout << "DURATION:" << duration << std::endl;
// std::cout << "samples per minute:" << (float((i+1)*n_mols) / duration) * 60 * 1e9 << std::endl;
// }
// std::vector<float> X_feat(n_total_atoms*TOTAL_FEATURE_SIZE, 0);
// cudaMemcpy(&X_feat[0], d_X_feat, n_total_atoms*TOTAL_FEATURE_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
// }
|
df65fa01bd31dc336f23c0bced984c6ea2701cec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"Raytracer.cuh"
typedef unsigned int uint;
typedef unsigned char uchar;
__device__ LPDWORD gPixels;
__device__ Sphere* deviceScene;
__device__ Camera* deviceCamera;
__device__ hiprandState_t* deviceRandState;
const int sampleCount = 1;
std::vector<Sphere> hostScene;
Camera hostCamera;
__global__ void cudaCopyPixels(LPDWORD cpuPixels, LPDWORD gpuPixels, unsigned int size)
{
if (threadIdx.x == 0)
{
for (unsigned int i = 0; i < size; i++)
{
cpuPixels[i] = gpuPixels[i];
}
}
__syncthreads();
}
__global__ void cudaInitDeviceMemory()
{
printf("cudaInitDeviceMemory\n");
printf("\t - Malloc device scene memory.\n");
deviceScene = (Sphere*)(malloc(sizeof(Sphere) * 2));
printf("\t\t Malloc result : %p\n", &deviceScene[0]);
printf("\t\t Malloc result : %p\n", &deviceScene[1]);
printf("\t\t %d thread acquire %p \n", threadIdx.x, &deviceScene[0]);
printf("\t\t %d thread acquire %p \n", threadIdx.x, &deviceScene[1]);
}
__global__ void cudaCopyScene(Sphere* hostScene, unsigned int count)
{
printf("copy scene (gpu)\n");
printf("\tdevice object - %p\n", &deviceScene[0]);
printf("\thost object - %p\n", hostScene[0]);
if (threadIdx.x == 0)
{
for (unsigned int i = 0; i < count; i++)
{
deviceScene[i] = hostScene[i];
printf("%p\n", &deviceScene[i]);
}
}
__syncthreads();
}
__device__ float getAlpha(LPDWORD pixels, unsigned int width)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
// alpha mask
return pixels[index] && -16777216;
}
template<typename _Ty>
void mallocDevice(void** dst, unsigned int count)
{
hipError_t error = hipMalloc(dst, sizeof(_Ty) * count);
if (error != hipError_t::hipSuccess)
{
printf("\tcritical error occured, result must be hipSuccess.\n");
printf("%s\n", hipGetErrorString(error));
throw std::runtime_error("");
}
}
template<typename _Ty>
void copyHostToDevice(_Ty* device, _Ty* host, unsigned int count)
{
hipError_t error = hipMemcpy(device, host, sizeof(_Ty) * count, hipMemcpyHostToDevice);
if (error != hipError_t::hipSuccess)
{
printf("\tcritical error occured, result must be hipSuccess.\n");
printf("\t%s\n", hipGetErrorString(error));
terminate();
throw std::runtime_error("");
}
}
__device__ void setColor(LPDWORD pixels, unsigned int width, unsigned int height, Color color, float alpha, int sampleCount)
{
int writeColor = 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
/*auto scale = 1.0 / sampleCount;
auto r = color.e[0] * scale;
auto g = color.e[1] * scale;
auto b = color.e[2] * scale;
*/
auto scale = 1.0 / sampleCount;
auto a = Clamp(alpha, 0, 0.999);
auto r = Clamp(color.e[0] * scale, 0, 0.999);
auto g = Clamp(color.e[1] * scale, 0, 0.999);
auto b = Clamp(color.e[2] * scale, 0, 0.999);
int ia = static_cast<int>(__fmul_rd(255.999, a));
int ir = static_cast<int>(__fmul_rd(255.999, r));
int ig = static_cast<int>(__fmul_rd(255.999, g));
int ib = static_cast<int>(__fmul_rd(255.999, b));
//
writeColor |= (ia << 32);
writeColor |= (ir << 16);
writeColor |= (ig << 8);
writeColor |= ib;
auto index = y * width + x;
pixels[index] = writeColor;
__syncthreads();
return;
}
__global__ void clearPixels(LPDWORD pixels, unsigned int width, unsigned int height, int sampleCount)
{
const auto aspectRatio = 4.0 / 3.0;
const int imageWidth = width;
const int imageHeight = height;
auto origin = Point3(0, 0, 0);
auto horizontal = Vec3(aspectRatio * 2.0, 0, 0);
auto vertical = Vec3(0, 2.0, 0);
auto lowerLeft = origin - horizontal / 2 - vertical / 2 - Vec3(0, 0, 1.0);
int x = blockIdx.x * blockDim.x + threadIdx.x * blockIdx.z;
int y = blockIdx.y * blockDim.y + threadIdx.y * blockIdx.z;
auto u = float(x) / (width - 1);
auto v = float(y) / (height - 1);
Ray r(origin, lowerLeft + u * horizontal + v * vertical - origin);
Color outColor;
Vec3 unitDirection = UnitVector(r.mDirection);
auto t = 0.5 * (unitDirection.e[1] + 1.0);
outColor = (1.0 - t) * Color(1.0, 1.0, 1.0) + t * Color(0.5, 0.7, 1.0);
setColor(pixels, width, height, outColor, 0, sampleCount);
}
__device__ Color RayColor(LPDWORD pixels, Ray& r, unsigned int count, unsigned int width, unsigned int height, Sphere* deviceScene, int depth, int tid, hiprandState_t* randState)
{
Sphere sphere = deviceScene[blockIdx.z];
Color outColor{};
for (unsigned int j = 0; j < blockDim.z; j++)
{
Ray curRay = r;
float atten = 1.0f;
sphere = deviceScene[j];
for (unsigned int i = 0; i < depth; i++)
{
HitRecord rec{};
if (sphere.Hit(curRay, 0.001f, INF, rec))
{
Point3 target = rec.p + rec.normal + RandomUnitSphere(randState, tid);
atten *= 0.5;
//outColor *= atten;
curRay = Ray(rec.p, target - rec.p);
//return Color(1, 1, 1);
}
}
Vec3 unitDirection = UnitVector(curRay.mDirection);
auto t = 0.5 * (unitDirection.e[1] + 1.0);
outColor = (1.0 - t) * Color(1.0, 1.0, 1.0) + t * Color(0.5, 0.7, 1.0);
return atten * outColor;
}
return Color(0,0,0);
}
__global__ void CudaRender(LPDWORD pixels, unsigned int width, unsigned int height, unsigned int count, Sphere* deviceScene, int sampleCount,hiprandState_t* randState)
{
const auto aspectRatio = 4.0 / 3.0;
const int imageWidth = width;
const int imageHeight = height;
const int depth = 50;
auto origin = Point3(0, 0, 0);
auto horizontal = Vec3(aspectRatio * 2.0, 0, 0);
auto vertical = Vec3(0, 2.0, 0);
auto lowerLeft = origin - horizontal / 2 - vertical / 2 - Vec3(0, 0, 1.0);
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int tid = y * width + x;
Color outColor{};
for (unsigned int i = 0; i < sampleCount; i++)
{
auto u = float(x) / (width - 1);
auto v = float(y) / (height - 1);
Ray r(origin, lowerLeft + u * horizontal + v * vertical - origin);
outColor += RayColor(pixels, r, count, width, height, deviceScene, depth, tid, randState);
// setColor(pixels, width, height, outColor, 1, 1);
}
setColor(pixels, width, height, outColor, 1, sampleCount);
__syncthreads();
}
__global__ void ClearGradiant(LPDWORD pixels, unsigned int width, unsigned int height, Color color)
{
int writeColor = 0;
//int x = blockIdx.x * blockDim.x + threadIdx.x;
//int y = blockIdx.y * blockDim.y + threadIdx.y;
//auto r = __fdiv_rn(threadIdx.x, (width - 1));
//auto g = __fdiv_ru(blockIdx.x, (height - 1));
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
auto r = __fdiv_rn(x, (width - 1));
auto g = __fdiv_ru(y, (height - 1));
auto b = color.e[2];
int ir = static_cast<int>(__fmul_rd(255.999, r));
int ig = static_cast<int>(__fmul_rd(255.999, g));
int ib = static_cast<int>(__fmul_rd(255.999, b));
writeColor |= (ir << 16);
writeColor |= (ig << 8);
writeColor |= ib;
auto index = y * width + x;
pixels[index] = writeColor;
__syncthreads();
return;
}
__global__ void cudaInitRand(hiprandState_t* deviceRandStates, int count, unsigned int width)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int tid = y * width + x;
hiprand_init(tid, 0, 0, deviceRandStates);
__syncthreads();
return;
}
Raytracer::Raytracer(HWND handle, HINSTANCE instance, unsigned int width, unsigned int height)
: mHandle(handle), mInst(instance), mWidth(width), mHeight(height)
{
hipDeviceProp_t prop;
hipError_t error = hipGetDeviceProperties(&prop, 0);
std::cout << hipGetErrorString(error) << std::endl;
BITMAPINFO bitInfo{};
bitInfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bitInfo.bmiHeader.biWidth = width;
bitInfo.bmiHeader.biHeight = height;
bitInfo.bmiHeader.biBitCount = 32;
bitInfo.bmiHeader.biPlanes = 1;
bitInfo.bmiHeader.biCompression = BI_RGB;
HDC dc = GetDC(mHandle);
mBitmap = CreateDIBSection(dc, &bitInfo, DIB_RGB_COLORS, (void**)(&mPixels), nullptr, 0);
mMemoryDC = CreateCompatibleDC(dc);
SelectObject(mMemoryDC, mBitmap);
ReleaseDC(mHandle, dc);
// CUDA CODE ------------------------------------------------------
error = hipMalloc((void**)(&gPixels), 4 * 800 * 600);
std::cout << hipGetErrorString(error) << '\n';
hostScene.push_back(Sphere(Point3(0, 0, -1), 0.5));
hostScene.push_back(Sphere(Point3(0, -100.5, -1), 100));
printf("Start malloc device memory.\n");
mallocDevice<Sphere>((void**)&deviceScene, 2);
mallocDevice<Camera>((void**)&deviceCamera, 1);
dim3 blocks = dim3(16, 12, 2);
dim3 grids = dim3(width / blocks.x, height / blocks.y, 1);
int threadCount = grids.x * grids.y * blocks.x * blocks.y * blocks.z;
printf("%d\n", threadCount);
mallocDevice<hiprandState_t>((void**)&deviceRandState, threadCount);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cudaInitRand), dim3(grids),dim3(blocks), 0, 0, deviceRandState, width, threadCount);
hipDeviceSynchronize();
printf("\t - Success.\n");
printf("Start copying host memory to device.\n");
copyHostToDevice<Sphere>(deviceScene, &hostScene[0], 2);
copyHostToDevice<Camera>(deviceCamera, &hostCamera, 1);
printf("\t - Success.\n");
//ClearGradiant << <600, 800>> > (gPixels, mWidth, mHeight, Color(1, 1, 0.25));
}
void Raytracer::Run()
{
dim3 blocks = dim3(16, 12, hostScene.size());
dim3 grids = dim3(800 / blocks.x, 600 / blocks.y, 1);
hipError_t error;
//ClearGradiant << <grids, blocks>> > (gPixels, mWidth, mHeight, Color(1, 1, 0.25));
//clearPixels << <grids, blocks >> > (gPixels, mWidth, mHeight, sampleCount);
hipDeviceSynchronize();
CudaRender << <grids, blocks>> > (gPixels, mWidth, mHeight, hostScene.size(), deviceScene, sampleCount, deviceRandState);
error = hipGetLastError();
hipDeviceSynchronize();
if (error != hipError_t::hipSuccess)
{
std::cerr << "\tcritical error occured, result must be hipSuccess.\n";
std::cerr << hipGetErrorName(error) << '\n' << hipGetErrorString(error) << std::endl;
throw std::runtime_error("");
}
error = hipMemcpy(mPixels, gPixels, sizeof(DWORD) * 800 * 600, hipMemcpyDeviceToHost);
if (error != hipError_t::hipSuccess)
{
std::cerr << "\tcritical error occured, result must be hipSuccess.\n";
std::cerr << hipGetErrorString(error) << std::endl;
throw std::runtime_error("");
}
hipDeviceSynchronize();
}
void Raytracer::Release()
{
DeleteDC(mMemoryDC);
DeleteObject(mBitmap);
}
| df65fa01bd31dc336f23c0bced984c6ea2701cec.cu | #include"Raytracer.cuh"
typedef unsigned int uint;
typedef unsigned char uchar;
__device__ LPDWORD gPixels;
__device__ Sphere* deviceScene;
__device__ Camera* deviceCamera;
__device__ curandState* deviceRandState;
const int sampleCount = 1;
std::vector<Sphere> hostScene;
Camera hostCamera;
__global__ void cudaCopyPixels(LPDWORD cpuPixels, LPDWORD gpuPixels, unsigned int size)
{
if (threadIdx.x == 0)
{
for (unsigned int i = 0; i < size; i++)
{
cpuPixels[i] = gpuPixels[i];
}
}
__syncthreads();
}
__global__ void cudaInitDeviceMemory()
{
printf("cudaInitDeviceMemory\n");
printf("\t - Malloc device scene memory.\n");
deviceScene = (Sphere*)(malloc(sizeof(Sphere) * 2));
printf("\t\t Malloc result : %p\n", &deviceScene[0]);
printf("\t\t Malloc result : %p\n", &deviceScene[1]);
printf("\t\t %d thread acquire %p \n", threadIdx.x, &deviceScene[0]);
printf("\t\t %d thread acquire %p \n", threadIdx.x, &deviceScene[1]);
}
__global__ void cudaCopyScene(Sphere* hostScene, unsigned int count)
{
printf("copy scene (gpu)\n");
printf("\tdevice object - %p\n", &deviceScene[0]);
printf("\thost object - %p\n", hostScene[0]);
if (threadIdx.x == 0)
{
for (unsigned int i = 0; i < count; i++)
{
deviceScene[i] = hostScene[i];
printf("%p\n", &deviceScene[i]);
}
}
__syncthreads();
}
__device__ float getAlpha(LPDWORD pixels, unsigned int width)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
// alpha mask
return pixels[index] && -16777216;
}
template<typename _Ty>
void mallocDevice(void** dst, unsigned int count)
{
cudaError error = cudaMalloc(dst, sizeof(_Ty) * count);
if (error != cudaError::cudaSuccess)
{
printf("\tcritical error occured, result must be cudaSuccess.\n");
printf("%s\n", cudaGetErrorString(error));
throw std::runtime_error("");
}
}
template<typename _Ty>
void copyHostToDevice(_Ty* device, _Ty* host, unsigned int count)
{
cudaError error = cudaMemcpy(device, host, sizeof(_Ty) * count, cudaMemcpyHostToDevice);
if (error != cudaError::cudaSuccess)
{
printf("\tcritical error occured, result must be cudaSuccess.\n");
printf("\t%s\n", cudaGetErrorString(error));
terminate();
throw std::runtime_error("");
}
}
__device__ void setColor(LPDWORD pixels, unsigned int width, unsigned int height, Color color, float alpha, int sampleCount)
{
int writeColor = 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
/*auto scale = 1.0 / sampleCount;
auto r = color.e[0] * scale;
auto g = color.e[1] * scale;
auto b = color.e[2] * scale;
*/
auto scale = 1.0 / sampleCount;
auto a = Clamp(alpha, 0, 0.999);
auto r = Clamp(color.e[0] * scale, 0, 0.999);
auto g = Clamp(color.e[1] * scale, 0, 0.999);
auto b = Clamp(color.e[2] * scale, 0, 0.999);
int ia = static_cast<int>(__fmul_rd(255.999, a));
int ir = static_cast<int>(__fmul_rd(255.999, r));
int ig = static_cast<int>(__fmul_rd(255.999, g));
int ib = static_cast<int>(__fmul_rd(255.999, b));
// 비트시프트로 채널마다 값 할당
writeColor |= (ia << 32);
writeColor |= (ir << 16);
writeColor |= (ig << 8);
writeColor |= ib;
auto index = y * width + x;
pixels[index] = writeColor;
__syncthreads();
return;
}
__global__ void clearPixels(LPDWORD pixels, unsigned int width, unsigned int height, int sampleCount)
{
const auto aspectRatio = 4.0 / 3.0;
const int imageWidth = width;
const int imageHeight = height;
auto origin = Point3(0, 0, 0);
auto horizontal = Vec3(aspectRatio * 2.0, 0, 0);
auto vertical = Vec3(0, 2.0, 0);
auto lowerLeft = origin - horizontal / 2 - vertical / 2 - Vec3(0, 0, 1.0);
int x = blockIdx.x * blockDim.x + threadIdx.x * blockIdx.z;
int y = blockIdx.y * blockDim.y + threadIdx.y * blockIdx.z;
auto u = float(x) / (width - 1);
auto v = float(y) / (height - 1);
Ray r(origin, lowerLeft + u * horizontal + v * vertical - origin);
Color outColor;
Vec3 unitDirection = UnitVector(r.mDirection);
auto t = 0.5 * (unitDirection.e[1] + 1.0);
outColor = (1.0 - t) * Color(1.0, 1.0, 1.0) + t * Color(0.5, 0.7, 1.0);
setColor(pixels, width, height, outColor, 0, sampleCount);
}
__device__ Color RayColor(LPDWORD pixels, Ray& r, unsigned int count, unsigned int width, unsigned int height, Sphere* deviceScene, int depth, int tid, curandState* randState)
{
Sphere sphere = deviceScene[blockIdx.z];
Color outColor{};
for (unsigned int j = 0; j < blockDim.z; j++)
{
Ray curRay = r;
float atten = 1.0f;
sphere = deviceScene[j];
for (unsigned int i = 0; i < depth; i++)
{
HitRecord rec{};
if (sphere.Hit(curRay, 0.001f, INF, rec))
{
Point3 target = rec.p + rec.normal + RandomUnitSphere(randState, tid);
atten *= 0.5;
//outColor *= atten;
curRay = Ray(rec.p, target - rec.p);
//return Color(1, 1, 1);
}
}
Vec3 unitDirection = UnitVector(curRay.mDirection);
auto t = 0.5 * (unitDirection.e[1] + 1.0);
outColor = (1.0 - t) * Color(1.0, 1.0, 1.0) + t * Color(0.5, 0.7, 1.0);
return atten * outColor;
}
return Color(0,0,0);
}
__global__ void CudaRender(LPDWORD pixels, unsigned int width, unsigned int height, unsigned int count, Sphere* deviceScene, int sampleCount,curandState* randState)
{
const auto aspectRatio = 4.0 / 3.0;
const int imageWidth = width;
const int imageHeight = height;
const int depth = 50;
auto origin = Point3(0, 0, 0);
auto horizontal = Vec3(aspectRatio * 2.0, 0, 0);
auto vertical = Vec3(0, 2.0, 0);
auto lowerLeft = origin - horizontal / 2 - vertical / 2 - Vec3(0, 0, 1.0);
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int tid = y * width + x;
Color outColor{};
for (unsigned int i = 0; i < sampleCount; i++)
{
auto u = float(x) / (width - 1);
auto v = float(y) / (height - 1);
Ray r(origin, lowerLeft + u * horizontal + v * vertical - origin);
outColor += RayColor(pixels, r, count, width, height, deviceScene, depth, tid, randState);
// setColor(pixels, width, height, outColor, 1, 1);
}
setColor(pixels, width, height, outColor, 1, sampleCount);
__syncthreads();
}
__global__ void ClearGradiant(LPDWORD pixels, unsigned int width, unsigned int height, Color color)
{
int writeColor = 0;
//int x = blockIdx.x * blockDim.x + threadIdx.x;
//int y = blockIdx.y * blockDim.y + threadIdx.y;
//auto r = __fdiv_rn(threadIdx.x, (width - 1));
//auto g = __fdiv_ru(blockIdx.x, (height - 1));
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
auto r = __fdiv_rn(x, (width - 1));
auto g = __fdiv_ru(y, (height - 1));
auto b = color.e[2];
int ir = static_cast<int>(__fmul_rd(255.999, r));
int ig = static_cast<int>(__fmul_rd(255.999, g));
int ib = static_cast<int>(__fmul_rd(255.999, b));
writeColor |= (ir << 16);
writeColor |= (ig << 8);
writeColor |= ib;
auto index = y * width + x;
pixels[index] = writeColor;
__syncthreads();
return;
}
__global__ void cudaInitRand(curandState* deviceRandStates, int count, unsigned int width)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int tid = y * width + x;
curand_init(tid, 0, 0, deviceRandStates);
__syncthreads();
return;
}
Raytracer::Raytracer(HWND handle, HINSTANCE instance, unsigned int width, unsigned int height)
: mHandle(handle), mInst(instance), mWidth(width), mHeight(height)
{
cudaDeviceProp prop;
cudaError_t error = cudaGetDeviceProperties(&prop, 0);
std::cout << cudaGetErrorString(error) << std::endl;
BITMAPINFO bitInfo{};
bitInfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bitInfo.bmiHeader.biWidth = width;
bitInfo.bmiHeader.biHeight = height;
bitInfo.bmiHeader.biBitCount = 32;
bitInfo.bmiHeader.biPlanes = 1;
bitInfo.bmiHeader.biCompression = BI_RGB;
HDC dc = GetDC(mHandle);
mBitmap = CreateDIBSection(dc, &bitInfo, DIB_RGB_COLORS, (void**)(&mPixels), nullptr, 0);
mMemoryDC = CreateCompatibleDC(dc);
SelectObject(mMemoryDC, mBitmap);
ReleaseDC(mHandle, dc);
// CUDA CODE ------------------------------------------------------
error = cudaMalloc((void**)(&gPixels), 4 * 800 * 600);
std::cout << cudaGetErrorString(error) << '\n';
hostScene.push_back(Sphere(Point3(0, 0, -1), 0.5));
hostScene.push_back(Sphere(Point3(0, -100.5, -1), 100));
printf("Start malloc device memory.\n");
mallocDevice<Sphere>((void**)&deviceScene, 2);
mallocDevice<Camera>((void**)&deviceCamera, 1);
dim3 blocks = dim3(16, 12, 2);
dim3 grids = dim3(width / blocks.x, height / blocks.y, 1);
int threadCount = grids.x * grids.y * blocks.x * blocks.y * blocks.z;
printf("%d\n", threadCount);
mallocDevice<curandState>((void**)&deviceRandState, threadCount);
cudaDeviceSynchronize();
cudaInitRand<<<grids,blocks>>>(deviceRandState, width, threadCount);
cudaDeviceSynchronize();
printf("\t - Success.\n");
printf("Start copying host memory to device.\n");
copyHostToDevice<Sphere>(deviceScene, &hostScene[0], 2);
copyHostToDevice<Camera>(deviceCamera, &hostCamera, 1);
printf("\t - Success.\n");
//ClearGradiant << <600, 800>> > (gPixels, mWidth, mHeight, Color(1, 1, 0.25));
}
void Raytracer::Run()
{
dim3 blocks = dim3(16, 12, hostScene.size());
dim3 grids = dim3(800 / blocks.x, 600 / blocks.y, 1);
cudaError error;
//ClearGradiant << <grids, blocks>> > (gPixels, mWidth, mHeight, Color(1, 1, 0.25));
//clearPixels << <grids, blocks >> > (gPixels, mWidth, mHeight, sampleCount);
cudaDeviceSynchronize();
CudaRender << <grids, blocks>> > (gPixels, mWidth, mHeight, hostScene.size(), deviceScene, sampleCount, deviceRandState);
error = cudaGetLastError();
cudaDeviceSynchronize();
if (error != cudaError::cudaSuccess)
{
std::cerr << "\tcritical error occured, result must be cudaSuccess.\n";
std::cerr << cudaGetErrorName(error) << '\n' << cudaGetErrorString(error) << std::endl;
throw std::runtime_error("");
}
error = cudaMemcpy(mPixels, gPixels, sizeof(DWORD) * 800 * 600, cudaMemcpyDeviceToHost);
if (error != cudaError::cudaSuccess)
{
std::cerr << "\tcritical error occured, result must be cudaSuccess.\n";
std::cerr << cudaGetErrorString(error) << std::endl;
throw std::runtime_error("");
}
cudaDeviceSynchronize();
}
void Raytracer::Release()
{
DeleteDC(mMemoryDC);
DeleteObject(mBitmap);
}
|
852136fdac48f959abbc2df99ad66306e73f70db.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include <algorithm>
#include "../src/cufinufft.h"
#include "../src/spreadinterp.h"
#include "../finufft/utils.h"
using namespace std;
int main(int argc, char* argv[])
{
int nf1, nf2;
FLT upsampfac=2.0;
int N1, N2, M;
if (argc<5) {
fprintf(stderr,"Usage: spread2d [method [maxsubprob [nupts_distr [N1 N2 [rep [tol [kerevalmeth]]]]]]]\n");
fprintf(stderr,"Details --\n");
fprintf(stderr,"method 1: nupts driven\n");
fprintf(stderr,"method 2: sub-problem\n");
fprintf(stderr,"method 3: sub-problem with paul's idea\n");
return 1;
}
double w;
int method;
sscanf(argv[1],"%d",&method);
int maxsubprobsize;
sscanf(argv[2],"%d",&maxsubprobsize);
int nupts_distribute;
sscanf(argv[3],"%d",&nupts_distribute);
sscanf(argv[4],"%lf",&w); nf1 = (int)w; // so can read 1e6 right!
sscanf(argv[5],"%lf",&w); nf2 = (int)w; // so can read 1e6 right!
N1 = (int) nf1/upsampfac;
N2 = (int) nf2/upsampfac;
int rep = 10;
if(argc>6){
//sscanf(argv[6],"%lf",&w); M = (int)w; // so can read 1e6 right!
sscanf(argv[6],"%d",&rep);
//if(M == 0) M=N1*N2*4*rep;
}
M = N1*N2*4*rep;// let density always be 1
M = nf1*nf2*rep;// let density always be 1
FLT tol=1e-6;
if(argc>7){
sscanf(argv[7],"%lf",&w); tol = (FLT)w; // so can read 1e6 right!
}
int kerevalmeth=0;
if(argc>8){
sscanf(argv[8],"%d",&kerevalmeth);
}
int ier;
int dim=2;
int ns=::ceil(-log10(tol/10.0));
cufinufft_plan dplan;
ier = cufinufft_default_opts(type1, dim, dplan.opts);
if(ier != 0 ){
cout<<"error: cufinufft_default_opts"<<endl;
return 0;
}
ier = setup_spreader_for_nufft(dplan.spopts, tol, dplan.opts);
dplan.opts.gpu_method=method;
dplan.opts.upsampfac=upsampfac;
dplan.opts.gpu_maxsubprobsize=maxsubprobsize;
dplan.opts.gpu_kerevalmeth=kerevalmeth;
dplan.spopts.pirange=0;
cout<<scientific<<setprecision(3);
FLT *x, *y;
CPX *c, *fw;
hipHostMalloc(&x, M*sizeof(FLT));
hipHostMalloc(&y, M*sizeof(FLT));
hipHostMalloc(&c, M*sizeof(CPX));
hipHostMalloc(&fw,nf1*nf2*sizeof(CPX));
switch(nupts_distribute){
// Making data
case 1: //uniform
{
for (int j=0; j<nf2; j++) {
for (int i=0; i<nf1; i++){
for (int k=0; k<rep; k++){
if(k+i*rep+j*nf1*rep < M){
x[k+i*rep+j*nf1*rep] = i;
y[k+i*rep+j*nf1*rep] = j;
}
}
}
}
#if 0
srand(unsigned(1));
random_shuffle (&x[0], &x[M-1]);
srand(unsigned(1));
random_shuffle (&y[0], &y[M-1]);
#endif
for (int i = 0; i < M; i++) {
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
case 2:
{
for (int k=0; k<rep; k++){
for (int j=0; j<nf2; j++) {
for (int i=0; i<nf1; i++){
if(i+j*nf1+k*nf1*nf2< M){
x[i+j*nf1+k*nf1*nf2] = i;
y[i+j*nf1+k*nf1*nf2] = j;
}
}
}
}
for (int i = 0; i < M; i++) {
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
case 3:
{
for (int j=0; j<nf2; j++) {
for (int i=0; i<nf1; i++){
for (int k=0; k<rep; k++){
if(k+i*rep+j*nf1*rep < M){
x[k+i*rep+j*nf1*rep] = i;
y[k+i*rep+j*nf1*rep] = j;
}
}
}
}
srand(unsigned(1));
random_shuffle (&x[0], &x[M-1]);
srand(unsigned(1));
random_shuffle (&y[0], &y[M-1]);
for (int i = 0; i < M; i++) {
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
case 4: // concentrate on a small region
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*rand01(), nf1, 1)/2.0 - 0.5;// x in [-pi,pi)
y[i] = RESCALE(M_PI*rand01(), nf2, 1)/2.0 - 0.5;
if(method == 6){
x[i] = x[i] > nf1-0.5 ? x[i] - nf1 : x[i];
y[i] = y[i] > nf2-0.5 ? y[i] - nf2 : y[i];// x in [-pi,pi)
}
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
case 5:
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*randm11(), nf1, 1);// x in [-pi,pi)
y[i] = RESCALE(M_PI*randm11(), nf2, 1);
if(method == 6){
x[i] = x[i] > nf1-0.5 ? x[i] - nf1 : x[i];
y[i] = y[i] > nf2-0.5 ? y[i] - nf2 : y[i];// x in [-pi,pi)
}
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
case 6:
{
for(int i=0; i<M; i++) {
x[i] = 1;// x in [-pi,pi)
y[i] = 1;
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
}
CNTime timer;
/*warm up gpu*/
char *a;
timer.restart();
checkCudaErrors(hipMalloc(&a,1));
#ifdef TIME
cout<<"[time ]"<< " (warm up) First cudamalloc call " << timer.elapsedsec()
<<" s"<<endl<<endl;
#endif
#ifdef INFO
cout<<"[info ] Spreading "<<M<<" pts to ["<<nf1<<"x"<<nf2<<"] uniform grids"
<<endl;
#endif
timer.restart();
ier = cufinufft_spread2d(N1, N2, nf1, nf2, fw, M, x, y, c, &dplan);
if(ier != 0 ){
cout<<"error: cnufftspread2d"<<endl;
return 0;
}
FLT t=timer.elapsedsec();
printf("[Method %d] %ld NU pts to #%d U pts in %.3g s (\t%.3g NU pts/s)\n",
dplan.opts.gpu_method,M,nf1*nf2,t,M/t);
#if 0
cout<<"[result-input]"<<endl;
for(int j=0; j<nf2; j++){
if( j % dplan.opts.gpu_binsizey == 0)
printf("\n");
for (int i=0; i<nf1; i++){
if( i % dplan.opts.gpu_binsizex == 0 && i!=0)
printf(" |");
printf(" (%2.3g,%2.3g)",fw[i+j*nf1].real(),fw[i+j*nf1].imag() );
}
cout<<endl;
}
cout<<endl;
#endif
hipHostFree(x);
hipHostFree(y);
hipHostFree(c);
hipHostFree(fw);
return 0;
}
| 852136fdac48f959abbc2df99ad66306e73f70db.cu | #include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include <algorithm>
#include "../src/cufinufft.h"
#include "../src/spreadinterp.h"
#include "../finufft/utils.h"
using namespace std;
int main(int argc, char* argv[])
{
int nf1, nf2;
FLT upsampfac=2.0;
int N1, N2, M;
if (argc<5) {
fprintf(stderr,"Usage: spread2d [method [maxsubprob [nupts_distr [N1 N2 [rep [tol [kerevalmeth]]]]]]]\n");
fprintf(stderr,"Details --\n");
fprintf(stderr,"method 1: nupts driven\n");
fprintf(stderr,"method 2: sub-problem\n");
fprintf(stderr,"method 3: sub-problem with paul's idea\n");
return 1;
}
double w;
int method;
sscanf(argv[1],"%d",&method);
int maxsubprobsize;
sscanf(argv[2],"%d",&maxsubprobsize);
int nupts_distribute;
sscanf(argv[3],"%d",&nupts_distribute);
sscanf(argv[4],"%lf",&w); nf1 = (int)w; // so can read 1e6 right!
sscanf(argv[5],"%lf",&w); nf2 = (int)w; // so can read 1e6 right!
N1 = (int) nf1/upsampfac;
N2 = (int) nf2/upsampfac;
int rep = 10;
if(argc>6){
//sscanf(argv[6],"%lf",&w); M = (int)w; // so can read 1e6 right!
sscanf(argv[6],"%d",&rep);
//if(M == 0) M=N1*N2*4*rep;
}
M = N1*N2*4*rep;// let density always be 1
M = nf1*nf2*rep;// let density always be 1
FLT tol=1e-6;
if(argc>7){
sscanf(argv[7],"%lf",&w); tol = (FLT)w; // so can read 1e6 right!
}
int kerevalmeth=0;
if(argc>8){
sscanf(argv[8],"%d",&kerevalmeth);
}
int ier;
int dim=2;
int ns=std::ceil(-log10(tol/10.0));
cufinufft_plan dplan;
ier = cufinufft_default_opts(type1, dim, dplan.opts);
if(ier != 0 ){
cout<<"error: cufinufft_default_opts"<<endl;
return 0;
}
ier = setup_spreader_for_nufft(dplan.spopts, tol, dplan.opts);
dplan.opts.gpu_method=method;
dplan.opts.upsampfac=upsampfac;
dplan.opts.gpu_maxsubprobsize=maxsubprobsize;
dplan.opts.gpu_kerevalmeth=kerevalmeth;
dplan.spopts.pirange=0;
cout<<scientific<<setprecision(3);
FLT *x, *y;
CPX *c, *fw;
cudaMallocHost(&x, M*sizeof(FLT));
cudaMallocHost(&y, M*sizeof(FLT));
cudaMallocHost(&c, M*sizeof(CPX));
cudaMallocHost(&fw,nf1*nf2*sizeof(CPX));
switch(nupts_distribute){
// Making data
case 1: //uniform
{
for (int j=0; j<nf2; j++) {
for (int i=0; i<nf1; i++){
for (int k=0; k<rep; k++){
if(k+i*rep+j*nf1*rep < M){
x[k+i*rep+j*nf1*rep] = i;
y[k+i*rep+j*nf1*rep] = j;
}
}
}
}
#if 0
srand(unsigned(1));
random_shuffle (&x[0], &x[M-1]);
srand(unsigned(1));
random_shuffle (&y[0], &y[M-1]);
#endif
for (int i = 0; i < M; i++) {
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
case 2:
{
for (int k=0; k<rep; k++){
for (int j=0; j<nf2; j++) {
for (int i=0; i<nf1; i++){
if(i+j*nf1+k*nf1*nf2< M){
x[i+j*nf1+k*nf1*nf2] = i;
y[i+j*nf1+k*nf1*nf2] = j;
}
}
}
}
for (int i = 0; i < M; i++) {
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
case 3:
{
for (int j=0; j<nf2; j++) {
for (int i=0; i<nf1; i++){
for (int k=0; k<rep; k++){
if(k+i*rep+j*nf1*rep < M){
x[k+i*rep+j*nf1*rep] = i;
y[k+i*rep+j*nf1*rep] = j;
}
}
}
}
srand(unsigned(1));
random_shuffle (&x[0], &x[M-1]);
srand(unsigned(1));
random_shuffle (&y[0], &y[M-1]);
for (int i = 0; i < M; i++) {
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
case 4: // concentrate on a small region
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*rand01(), nf1, 1)/2.0 - 0.5;// x in [-pi,pi)
y[i] = RESCALE(M_PI*rand01(), nf2, 1)/2.0 - 0.5;
if(method == 6){
x[i] = x[i] > nf1-0.5 ? x[i] - nf1 : x[i];
y[i] = y[i] > nf2-0.5 ? y[i] - nf2 : y[i];// x in [-pi,pi)
}
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
case 5:
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*randm11(), nf1, 1);// x in [-pi,pi)
y[i] = RESCALE(M_PI*randm11(), nf2, 1);
if(method == 6){
x[i] = x[i] > nf1-0.5 ? x[i] - nf1 : x[i];
y[i] = y[i] > nf2-0.5 ? y[i] - nf2 : y[i];// x in [-pi,pi)
}
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
case 6:
{
for(int i=0; i<M; i++) {
x[i] = 1;// x in [-pi,pi)
y[i] = 1;
c[i].real(randm11());
c[i].imag(randm11());
}
}
break;
}
CNTime timer;
/*warm up gpu*/
char *a;
timer.restart();
checkCudaErrors(cudaMalloc(&a,1));
#ifdef TIME
cout<<"[time ]"<< " (warm up) First cudamalloc call " << timer.elapsedsec()
<<" s"<<endl<<endl;
#endif
#ifdef INFO
cout<<"[info ] Spreading "<<M<<" pts to ["<<nf1<<"x"<<nf2<<"] uniform grids"
<<endl;
#endif
timer.restart();
ier = cufinufft_spread2d(N1, N2, nf1, nf2, fw, M, x, y, c, &dplan);
if(ier != 0 ){
cout<<"error: cnufftspread2d"<<endl;
return 0;
}
FLT t=timer.elapsedsec();
printf("[Method %d] %ld NU pts to #%d U pts in %.3g s (\t%.3g NU pts/s)\n",
dplan.opts.gpu_method,M,nf1*nf2,t,M/t);
#if 0
cout<<"[result-input]"<<endl;
for(int j=0; j<nf2; j++){
if( j % dplan.opts.gpu_binsizey == 0)
printf("\n");
for (int i=0; i<nf1; i++){
if( i % dplan.opts.gpu_binsizex == 0 && i!=0)
printf(" |");
printf(" (%2.3g,%2.3g)",fw[i+j*nf1].real(),fw[i+j*nf1].imag() );
}
cout<<endl;
}
cout<<endl;
#endif
cudaFreeHost(x);
cudaFreeHost(y);
cudaFreeHost(c);
cudaFreeHost(fw);
return 0;
}
|
b24c1b1d590b4ac4cabfa9ca8c9605bbea1d30f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
csymv_upper.cu is nearly identical to chemv_upper.cu, just change names and drop MAGMA_C_CONJ.
chemv_kernel_U (upper) in chemv_upper.cu is very similar to
chemv_kernel_L (lower) in chemv.cu; diff the two files to compare.
@generated from magmablas/zhemv_upper.cu, normal z -> c, Sun Nov 20 20:20:28 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
*******************************************************************************/
__global__ void
chemv_kernel_U(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_C_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_C_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_C_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end chemv_kernel_U
/***************************************************************************//**
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
*******************************************************************************/
__global__ void
chemv_kernel_U_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
| b24c1b1d590b4ac4cabfa9ca8c9605bbea1d30f9.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
csymv_upper.cu is nearly identical to chemv_upper.cu, just change names and drop MAGMA_C_CONJ.
chemv_kernel_U (upper) in chemv_upper.cu is very similar to
chemv_kernel_L (lower) in chemv.cu; diff the two files to compare.
@generated from magmablas/zhemv_upper.cu, normal z -> c, Sun Nov 20 20:20:28 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
*******************************************************************************/
__global__ void
chemv_kernel_U(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_C_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_C_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_C_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end chemv_kernel_U
/***************************************************************************//**
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
*******************************************************************************/
__global__ void
chemv_kernel_U_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
|
1c5f81e1576d267aa6b41f421b4a552d67466131.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sgemvn_kernel1_fermi.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int m = 2;
int n1 = 1;
float alpha = 2;
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int lda = 1;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sgemvn_kernel1_fermi), dim3(gridBlock),dim3(threadBlock), 0, 0, n,m,n1,alpha,A,lda,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sgemvn_kernel1_fermi), dim3(gridBlock),dim3(threadBlock), 0, 0, n,m,n1,alpha,A,lda,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sgemvn_kernel1_fermi), dim3(gridBlock),dim3(threadBlock), 0, 0, n,m,n1,alpha,A,lda,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1c5f81e1576d267aa6b41f421b4a552d67466131.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sgemvn_kernel1_fermi.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int m = 2;
int n1 = 1;
float alpha = 2;
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int lda = 1;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sgemvn_kernel1_fermi<<<gridBlock,threadBlock>>>(n,m,n1,alpha,A,lda,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sgemvn_kernel1_fermi<<<gridBlock,threadBlock>>>(n,m,n1,alpha,A,lda,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sgemvn_kernel1_fermi<<<gridBlock,threadBlock>>>(n,m,n1,alpha,A,lda,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
886540a4fe6c4042540760c459bc98d1401363a0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs,
* with an emphasis on simple illustration of the techniques (not on performance).
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <time.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include "./common/helper_functions.h"
#include "./common/helper_cuda.h"
#include "./common/timer.h"
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
#include "simpleMultiGPU.h"
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int MAX_GPU_COUNT = 32;
const int DATA_N = 1048576 * 32;
////////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA Sample describing
// reduction optimization strategies
////////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel(float *d_Result, float *d_Input, int N)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for (int pos = tid; pos < N; pos += threadN)
sum += d_Input[pos];
d_Result[tid] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
//Solver config
TGPUplan plan[MAX_GPU_COUNT];
//GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, gpuBase, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
printf("Starting simpleMultiGPU\n");
checkCudaErrors(hipGetDeviceCount(&GPU_N));
if (GPU_N > MAX_GPU_COUNT)
{
GPU_N = MAX_GPU_COUNT;
}
printf("CUDA-capable device count: %i\n", GPU_N);
printf("Generating input data...\n\n");
//Subdividing input data across GPUs
//Get data sizes for each GPU
for (i = 0; i < GPU_N; i++)
{
plan[i].dataN = DATA_N / GPU_N;
}
//Take into account "odd" data sizes
for (i = 0; i < DATA_N % GPU_N; i++)
{
plan[i].dataN++;
}
//Assign data ranges to GPUs
gpuBase = 0;
for (i = 0; i < GPU_N; i++)
{
plan[i].h_Sum = h_SumGPU + i;
gpuBase += plan[i].dataN;
}
//Create streams for issuing GPU command asynchronously and allocate memory (GPU and System page-locked)
for (i = 0; i < GPU_N; i++)
{
checkCudaErrors(hipSetDevice(i));
checkCudaErrors(hipStreamCreate(&plan[i].stream));
//Allocate memory
checkCudaErrors(hipMalloc((void **)&plan[i].d_Data, plan[i].dataN * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&plan[i].d_Sum, ACCUM_N * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&plan[i].h_Sum_from_device, ACCUM_N * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&plan[i].h_Data, plan[i].dataN * sizeof(float)));
for (j = 0; j < plan[i].dataN; j++)
{
plan[i].h_Data[j] = (float)rand() / (float)RAND_MAX;
}
}
//Start timing and compute on GPU(s)
printf("Computing with %d GPUs...\n", GPU_N);
StartTimer();
//Copy data to GPU, launch the kernel and copy data back. All asynchronously
for (i = 0; i < GPU_N; i++)
{
//Set device
checkCudaErrors(hipSetDevice(i));
//Copy input data from CPU
checkCudaErrors(hipMemcpyAsync(plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof(float), hipMemcpyHostToDevice, plan[i].stream));
//Perform GPU computations
hipLaunchKernelGGL(( reduceKernel), dim3(BLOCK_N), dim3(THREAD_N), 0, plan[i].stream, plan[i].d_Sum, plan[i].d_Data, plan[i].dataN);
getLastCudaError("reduceKernel() execution failed.\n");
//Read back GPU results
checkCudaErrors(hipMemcpyAsync(plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N *sizeof(float), hipMemcpyDeviceToHost, plan[i].stream));
}
//Process GPU results
for (i = 0; i < GPU_N; i++)
{
float sum;
//Set device
checkCudaErrors(hipSetDevice(i));
//Wait for all operations to finish
hipStreamSynchronize(plan[i].stream);
//Finalize GPU reduction for current subvector
sum = 0;
for (j = 0; j < ACCUM_N; j++)
{
sum += plan[i].h_Sum_from_device[j];
}
*(plan[i].h_Sum) = (float)sum;
//Shut down this GPU
checkCudaErrors(hipHostFree(plan[i].h_Sum_from_device));
checkCudaErrors(hipFree(plan[i].d_Sum));
checkCudaErrors(hipFree(plan[i].d_Data));
checkCudaErrors(hipStreamDestroy(plan[i].stream));
}
sumGPU = 0;
for (i = 0; i < GPU_N; i++)
{
sumGPU += h_SumGPU[i];
}
printf(" GPU Processing time: %f (ms)\n\n", GetTimer());
// Compute on Host CPU
printf("Computing with Host CPU...\n\n");
struct timespec cpu_start, cpu_stop;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_start);
sumCPU = 0;
for (i = 0; i < GPU_N; i++)
{
for (j = 0; j < plan[i].dataN; j++)
{
sumCPU += plan[i].h_Data[j];
}
}
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_stop);
double result = (cpu_stop.tv_sec - cpu_start.tv_sec) * 1e3 + (cpu_stop.tv_nsec - cpu_start.tv_nsec) / 1e6;
printf( "cpu execution time: %3.1f ms\n", result);
// Compare GPU and CPU results
printf("Comparing GPU and Host CPU results...\n");
diff = fabs(sumCPU - sumGPU) / fabs(sumCPU);
printf(" GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU);
printf(" Relative difference: %E \n\n", diff);
// Cleanup and shutdown
for (i = 0; i < GPU_N; i++)
{
checkCudaErrors(hipSetDevice(i));
checkCudaErrors(hipHostFree(plan[i].h_Data));
}
exit((diff < 1e-5) ? EXIT_SUCCESS : EXIT_FAILURE);
}
| 886540a4fe6c4042540760c459bc98d1401363a0.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs,
* with an emphasis on simple illustration of the techniques (not on performance).
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <time.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include "./common/helper_functions.h"
#include "./common/helper_cuda.h"
#include "./common/timer.h"
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
#include "simpleMultiGPU.h"
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int MAX_GPU_COUNT = 32;
const int DATA_N = 1048576 * 32;
////////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA Sample describing
// reduction optimization strategies
////////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel(float *d_Result, float *d_Input, int N)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for (int pos = tid; pos < N; pos += threadN)
sum += d_Input[pos];
d_Result[tid] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
//Solver config
TGPUplan plan[MAX_GPU_COUNT];
//GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, gpuBase, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
printf("Starting simpleMultiGPU\n");
checkCudaErrors(cudaGetDeviceCount(&GPU_N));
if (GPU_N > MAX_GPU_COUNT)
{
GPU_N = MAX_GPU_COUNT;
}
printf("CUDA-capable device count: %i\n", GPU_N);
printf("Generating input data...\n\n");
//Subdividing input data across GPUs
//Get data sizes for each GPU
for (i = 0; i < GPU_N; i++)
{
plan[i].dataN = DATA_N / GPU_N;
}
//Take into account "odd" data sizes
for (i = 0; i < DATA_N % GPU_N; i++)
{
plan[i].dataN++;
}
//Assign data ranges to GPUs
gpuBase = 0;
for (i = 0; i < GPU_N; i++)
{
plan[i].h_Sum = h_SumGPU + i;
gpuBase += plan[i].dataN;
}
//Create streams for issuing GPU command asynchronously and allocate memory (GPU and System page-locked)
for (i = 0; i < GPU_N; i++)
{
checkCudaErrors(cudaSetDevice(i));
checkCudaErrors(cudaStreamCreate(&plan[i].stream));
//Allocate memory
checkCudaErrors(cudaMalloc((void **)&plan[i].d_Data, plan[i].dataN * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&plan[i].d_Sum, ACCUM_N * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&plan[i].h_Sum_from_device, ACCUM_N * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&plan[i].h_Data, plan[i].dataN * sizeof(float)));
for (j = 0; j < plan[i].dataN; j++)
{
plan[i].h_Data[j] = (float)rand() / (float)RAND_MAX;
}
}
//Start timing and compute on GPU(s)
printf("Computing with %d GPUs...\n", GPU_N);
StartTimer();
//Copy data to GPU, launch the kernel and copy data back. All asynchronously
for (i = 0; i < GPU_N; i++)
{
//Set device
checkCudaErrors(cudaSetDevice(i));
//Copy input data from CPU
checkCudaErrors(cudaMemcpyAsync(plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof(float), cudaMemcpyHostToDevice, plan[i].stream));
//Perform GPU computations
reduceKernel<<<BLOCK_N, THREAD_N, 0, plan[i].stream>>>(plan[i].d_Sum, plan[i].d_Data, plan[i].dataN);
getLastCudaError("reduceKernel() execution failed.\n");
//Read back GPU results
checkCudaErrors(cudaMemcpyAsync(plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N *sizeof(float), cudaMemcpyDeviceToHost, plan[i].stream));
}
//Process GPU results
for (i = 0; i < GPU_N; i++)
{
float sum;
//Set device
checkCudaErrors(cudaSetDevice(i));
//Wait for all operations to finish
cudaStreamSynchronize(plan[i].stream);
//Finalize GPU reduction for current subvector
sum = 0;
for (j = 0; j < ACCUM_N; j++)
{
sum += plan[i].h_Sum_from_device[j];
}
*(plan[i].h_Sum) = (float)sum;
//Shut down this GPU
checkCudaErrors(cudaFreeHost(plan[i].h_Sum_from_device));
checkCudaErrors(cudaFree(plan[i].d_Sum));
checkCudaErrors(cudaFree(plan[i].d_Data));
checkCudaErrors(cudaStreamDestroy(plan[i].stream));
}
sumGPU = 0;
for (i = 0; i < GPU_N; i++)
{
sumGPU += h_SumGPU[i];
}
printf(" GPU Processing time: %f (ms)\n\n", GetTimer());
// Compute on Host CPU
printf("Computing with Host CPU...\n\n");
struct timespec cpu_start, cpu_stop;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_start);
sumCPU = 0;
for (i = 0; i < GPU_N; i++)
{
for (j = 0; j < plan[i].dataN; j++)
{
sumCPU += plan[i].h_Data[j];
}
}
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_stop);
double result = (cpu_stop.tv_sec - cpu_start.tv_sec) * 1e3 + (cpu_stop.tv_nsec - cpu_start.tv_nsec) / 1e6;
printf( "cpu execution time: %3.1f ms\n", result);
// Compare GPU and CPU results
printf("Comparing GPU and Host CPU results...\n");
diff = fabs(sumCPU - sumGPU) / fabs(sumCPU);
printf(" GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU);
printf(" Relative difference: %E \n\n", diff);
// Cleanup and shutdown
for (i = 0; i < GPU_N; i++)
{
checkCudaErrors(cudaSetDevice(i));
checkCudaErrors(cudaFreeHost(plan[i].h_Data));
}
exit((diff < 1e-5) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
4245a196ce549f75ed0279e03284f29630ea3a0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <ATen/hip/HIPContext.h>
#include <torch/torch.h>
#include <algorithm>
#include <stdexcept>
#include <stdint.h>
#include <cstdio>
#define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor")
#define CHECK_IS_INT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, #x " must be an int tensor")
#define CHECK_IS_FLOATING(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Float || x.scalar_type() == at::ScalarType::Half || x.scalar_type() == at::ScalarType::Double, #x " must be a floating tensor")
// just for compatability of half precision in AT_DISPATCH_FLOATING_TYPES_AND_HALF...
static inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) {
// requires CUDA >= 10 and ARCH >= 70
// this is very slow compared to float or __half2, and never used.
//return atomicAdd(reinterpret_cast<__half*>(address), val);
}
template <typename T>
static inline __host__ __device__ T div_round_up(T val, T divisor) {
return (val + divisor - 1) / divisor;
}
template <uint32_t D>
__device__ uint32_t fast_hash(const uint32_t pos_grid[D]) {
static_assert(D <= 7, "fast_hash can only hash up to 7 dimensions.");
// While 1 is technically not a good prime for hashing (or a prime at all), it helps memory coherence
// and is sufficient for our use case of obtaining a uniformly colliding index from high-dimensional
// coordinates.
constexpr uint32_t primes[7] = { 1, 2654435761, 805459861, 3674653429, 2097192037, 1434869437, 2165219737 };
uint32_t result = 0;
#pragma unroll
for (uint32_t i = 0; i < D; ++i) {
result ^= pos_grid[i] * primes[i];
}
return result;
}
template <uint32_t D, uint32_t C>
__device__ uint32_t get_grid_index(const uint32_t gridtype, const bool align_corners, const uint32_t ch, const uint32_t hashmap_size, const uint32_t resolution, const uint32_t pos_grid[D]) {
uint32_t stride = 1;
uint32_t index = 0;
#pragma unroll
for (uint32_t d = 0; d < D && stride <= hashmap_size; d++) {
index += pos_grid[d] * stride;
stride *= align_corners ? resolution: (resolution + 1);
}
// NOTE: for NeRF, the hash is in fact not necessary. Check https://github.com/NVlabs/instant-ngp/issues/97.
// gridtype: 0 == hash, 1 == tiled
if (gridtype == 0 && stride > hashmap_size) {
index = fast_hash<D>(pos_grid);
}
return (index % hashmap_size) * C + ch;
}
template <typename scalar_t, uint32_t D, uint32_t C>
__global__ void kernel_grid(
const float * __restrict__ inputs,
const scalar_t * __restrict__ grid,
const int * __restrict__ offsets,
scalar_t * __restrict__ outputs,
const uint32_t B, const uint32_t L, const float S, const uint32_t H,
const bool calc_grad_inputs,
scalar_t * __restrict__ dy_dx,
const uint32_t gridtype,
const bool align_corners
) {
const uint32_t b = blockIdx.x * blockDim.x + threadIdx.x;
if (b >= B) return;
const uint32_t level = blockIdx.y;
// locate
grid += (uint32_t)offsets[level] * C;
inputs += b * D;
outputs += level * B * C + b * C;
// check input range (should be in [0, 1])
bool flag_oob = false;
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
if (inputs[d] < 0 || inputs[d] > 1) {
flag_oob = true;
}
}
// if input out of bound, just set output to 0
if (flag_oob) {
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
outputs[ch] = 0;
}
if (calc_grad_inputs) {
dy_dx += b * D * L * C + level * D * C; // B L D C
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
dy_dx[d * C + ch] = 0;
}
}
}
return;
}
const uint32_t hashmap_size = offsets[level + 1] - offsets[level];
const float scale = exp2f(level * S) * H - 1.0f;
const uint32_t resolution = (uint32_t)ceil(scale) + 1;
// calculate coordinate
float pos[D];
uint32_t pos_grid[D];
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
pos[d] = inputs[d] * scale + (align_corners ? 0.0f : 0.5f);
pos_grid[d] = floorf(pos[d]);
pos[d] -= (float)pos_grid[d];
}
//printf("[b=%d, l=%d] pos=(%f, %f)+(%d, %d)\n", b, level, pos[0], pos[1], pos_grid[0], pos_grid[1]);
// interpolate
scalar_t results[C] = {0}; // temp results in register
#pragma unroll
for (uint32_t idx = 0; idx < (1 << D); idx++) {
float w = 1;
uint32_t pos_grid_local[D];
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
if ((idx & (1 << d)) == 0) {
w *= 1 - pos[d];
pos_grid_local[d] = pos_grid[d];
} else {
w *= pos[d];
pos_grid_local[d] = pos_grid[d] + 1;
}
}
uint32_t index = get_grid_index<D, C>(gridtype, align_corners, 0, hashmap_size, resolution, pos_grid_local);
// writing to register (fast)
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
results[ch] += w * grid[index + ch];
}
//printf("[b=%d, l=%d] int %d, idx %d, w %f, val %f\n", b, level, idx, index, w, grid[index]);
}
// writing to global memory (slow)
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
outputs[ch] = results[ch];
}
// prepare dy_dx for calc_grad_inputs
// differentiable (soft) indexing: https://discuss.pytorch.org/t/differentiable-indexing/17647/9
if (calc_grad_inputs) {
dy_dx += b * D * L * C + level * D * C; // B L D C
#pragma unroll
for (uint32_t gd = 0; gd < D; gd++) {
scalar_t results_grad[C] = {0};
#pragma unroll
for (uint32_t idx = 0; idx < (1 << (D - 1)); idx++) {
float w = scale;
uint32_t pos_grid_local[D];
#pragma unroll
for (uint32_t nd = 0; nd < D - 1; nd++) {
const uint32_t d = (nd >= gd) ? (nd + 1) : nd;
if ((idx & (1 << nd)) == 0) {
w *= 1 - pos[d];
pos_grid_local[d] = pos_grid[d];
} else {
w *= pos[d];
pos_grid_local[d] = pos_grid[d] + 1;
}
}
pos_grid_local[gd] = pos_grid[gd];
uint32_t index_left = get_grid_index<D, C>(gridtype, align_corners, 0, hashmap_size, resolution, pos_grid_local);
pos_grid_local[gd] = pos_grid[gd] + 1;
uint32_t index_right = get_grid_index<D, C>(gridtype, align_corners, 0, hashmap_size, resolution, pos_grid_local);
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
results_grad[ch] += w * (grid[index_right + ch] - grid[index_left + ch]);
}
}
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
dy_dx[gd * C + ch] = results_grad[ch];
}
}
}
}
template <typename scalar_t, uint32_t D, uint32_t C, uint32_t N_C>
__global__ void kernel_grid_backward(
const scalar_t * __restrict__ grad,
const float * __restrict__ inputs,
const scalar_t * __restrict__ grid,
const int * __restrict__ offsets,
scalar_t * __restrict__ grad_grid,
const uint32_t B, const uint32_t L, const float S, const uint32_t H,
const uint32_t gridtype,
const bool align_corners
) {
const uint32_t b = (blockIdx.x * blockDim.x + threadIdx.x) * N_C / C;
if (b >= B) return;
const uint32_t level = blockIdx.y;
const uint32_t ch = (blockIdx.x * blockDim.x + threadIdx.x) * N_C - b * C;
// locate
grad_grid += offsets[level] * C;
inputs += b * D;
grad += level * B * C + b * C + ch; // L, B, C
const uint32_t hashmap_size = offsets[level + 1] - offsets[level];
const float scale = exp2f(level * S) * H - 1.0f;
const uint32_t resolution = (uint32_t)ceil(scale) + 1;
// check input range (should be in [0, 1])
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
if (inputs[d] < 0 || inputs[d] > 1) {
return; // grad is init as 0, so we simply return.
}
}
// calculate coordinate
float pos[D];
uint32_t pos_grid[D];
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
pos[d] = inputs[d] * scale + (align_corners ? 0.0f : 0.5f);
pos_grid[d] = floorf(pos[d]);
pos[d] -= (float)pos_grid[d];
}
scalar_t grad_cur[N_C] = {0}; // fetch to register
#pragma unroll
for (uint32_t c = 0; c < N_C; c++) {
grad_cur[c] = grad[c];
}
// interpolate
#pragma unroll
for (uint32_t idx = 0; idx < (1 << D); idx++) {
float w = 1;
uint32_t pos_grid_local[D];
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
if ((idx & (1 << d)) == 0) {
w *= 1 - pos[d];
pos_grid_local[d] = pos_grid[d];
} else {
w *= pos[d];
pos_grid_local[d] = pos_grid[d] + 1;
}
}
uint32_t index = get_grid_index<D, C>(gridtype, align_corners, ch, hashmap_size, resolution, pos_grid_local);
// atomicAdd for __half is slow (especially for large values), so we use __half2 if N_C % 2 == 0
// TODO: use float which is better than __half, if N_C % 2 != 0
if (std::is_same<scalar_t, at::Half>::value && N_C % 2 == 0) {
#pragma unroll
for (uint32_t c = 0; c < N_C; c += 2) {
// process two __half at once (by interpreting as a __half2)
__half2 v = {(__half)(w * grad_cur[c]), (__half)(w * grad_cur[c + 1])};
atomicAdd((__half2*)&grad_grid[index + c], v);
}
// float, or __half when N_C % 2 != 0 (which means C == 1)
} else {
#pragma unroll
for (uint32_t c = 0; c < N_C; c++) {
atomicAdd((float*)&grad_grid[index + c], (float)(w * grad_cur[c]));
}
}
}
}
template <typename scalar_t, uint32_t D, uint32_t C>
__global__ void kernel_input_backward(
const scalar_t * __restrict__ grad,
const scalar_t * __restrict__ dy_dx,
scalar_t * __restrict__ grad_inputs,
uint32_t B, uint32_t L
) {
const uint32_t t = threadIdx.x + blockIdx.x * blockDim.x;
if (t >= B * D) return;
const uint32_t b = t / D;
const uint32_t d = t - b * D;
dy_dx += b * L * D * C;
scalar_t result = 0;
# pragma unroll
for (int l = 0; l < L; l++) {
# pragma unroll
for (int ch = 0; ch < C; ch++) {
result += grad[l * B * C + b * C + ch] * dy_dx[l * D * C + d * C + ch];
}
}
grad_inputs[t] = result;
}
template <typename scalar_t, uint32_t D>
void kernel_grid_wrapper(const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *outputs, const uint32_t B, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, scalar_t *dy_dx, const uint32_t gridtype, const bool align_corners) {
static constexpr uint32_t N_THREAD = 512;
const dim3 blocks_hashgrid = { div_round_up(B, N_THREAD), L, 1 };
switch (C) {
case 1:hipLaunchKernelGGL(( kernel_grid<scalar_t, D, 1>), dim3(blocks_hashgrid), dim3(N_THREAD), 0, 0, inputs, embeddings, offsets, outputs, B, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 2:hipLaunchKernelGGL(( kernel_grid<scalar_t, D, 2>), dim3(blocks_hashgrid), dim3(N_THREAD), 0, 0, inputs, embeddings, offsets, outputs, B, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 4:hipLaunchKernelGGL(( kernel_grid<scalar_t, D, 4>), dim3(blocks_hashgrid), dim3(N_THREAD), 0, 0, inputs, embeddings, offsets, outputs, B, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 8:hipLaunchKernelGGL(( kernel_grid<scalar_t, D, 8>), dim3(blocks_hashgrid), dim3(N_THREAD), 0, 0, inputs, embeddings, offsets, outputs, B, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
default: throw std::runtime_error{"GridEncoding: C must be 1, 2, 4, or 8."};
}
}
// inputs: [B, D], float, in [0, 1]
// embeddings: [sO, C], float
// offsets: [L + 1], uint32_t
// outputs: [L, B, C], float (L first, so only one level of hashmap needs to fit into cache at a time.)
// H: base resolution
// dy_dx: [B, L * D * C]
template <typename scalar_t>
void grid_encode_forward_cuda(const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *outputs, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, scalar_t *dy_dx, const uint32_t gridtype, const bool align_corners) {
switch (D) {
case 1: kernel_grid_wrapper<scalar_t, 1>(inputs, embeddings, offsets, outputs, B, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 2: kernel_grid_wrapper<scalar_t, 2>(inputs, embeddings, offsets, outputs, B, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 3: kernel_grid_wrapper<scalar_t, 3>(inputs, embeddings, offsets, outputs, B, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 4: kernel_grid_wrapper<scalar_t, 4>(inputs, embeddings, offsets, outputs, B, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 5: kernel_grid_wrapper<scalar_t, 5>(inputs, embeddings, offsets, outputs, B, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
default: throw std::runtime_error{"GridEncoding: D must be 1, 2, 3, 4, or 5."};
}
}
template <typename scalar_t, uint32_t D>
void kernel_grid_backward_wrapper(const scalar_t *grad, const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *grad_embeddings, const uint32_t B, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, scalar_t *dy_dx, scalar_t *grad_inputs, const uint32_t gridtype, const bool align_corners) {
static constexpr uint32_t N_THREAD = 256;
const uint32_t N_C = ::min(2u, C); // n_features_per_thread
const dim3 blocks_hashgrid = { div_round_up(B * C / N_C, N_THREAD), L, 1 };
switch (C) {
case 1:
hipLaunchKernelGGL(( kernel_grid_backward<scalar_t, D, 1, 1>), dim3(blocks_hashgrid), dim3(N_THREAD), 0, 0, grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners);
if (calc_grad_inputs)hipLaunchKernelGGL(( kernel_input_backward<scalar_t, D, 1>), dim3(div_round_up(B * D, N_THREAD)), dim3(N_THREAD), 0, 0, grad, dy_dx, grad_inputs, B, L);
break;
case 2:
hipLaunchKernelGGL(( kernel_grid_backward<scalar_t, D, 2, 2>), dim3(blocks_hashgrid), dim3(N_THREAD), 0, 0, grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners);
if (calc_grad_inputs)hipLaunchKernelGGL(( kernel_input_backward<scalar_t, D, 2>), dim3(div_round_up(B * D, N_THREAD)), dim3(N_THREAD), 0, 0, grad, dy_dx, grad_inputs, B, L);
break;
case 4:
hipLaunchKernelGGL(( kernel_grid_backward<scalar_t, D, 4, 2>), dim3(blocks_hashgrid), dim3(N_THREAD), 0, 0, grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners);
if (calc_grad_inputs)hipLaunchKernelGGL(( kernel_input_backward<scalar_t, D, 4>), dim3(div_round_up(B * D, N_THREAD)), dim3(N_THREAD), 0, 0, grad, dy_dx, grad_inputs, B, L);
break;
case 8:
hipLaunchKernelGGL(( kernel_grid_backward<scalar_t, D, 8, 2>), dim3(blocks_hashgrid), dim3(N_THREAD), 0, 0, grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners);
if (calc_grad_inputs)hipLaunchKernelGGL(( kernel_input_backward<scalar_t, D, 8>), dim3(div_round_up(B * D, N_THREAD)), dim3(N_THREAD), 0, 0, grad, dy_dx, grad_inputs, B, L);
break;
default: throw std::runtime_error{"GridEncoding: C must be 1, 2, 4, or 8."};
}
}
// grad: [L, B, C], float
// inputs: [B, D], float, in [0, 1]
// embeddings: [sO, C], float
// offsets: [L + 1], uint32_t
// grad_embeddings: [sO, C]
// H: base resolution
template <typename scalar_t>
void grid_encode_backward_cuda(const scalar_t *grad, const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *grad_embeddings, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, scalar_t *dy_dx, scalar_t *grad_inputs, const uint32_t gridtype, const bool align_corners) {
switch (D) {
case 1: kernel_grid_backward_wrapper<scalar_t, 1>(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, S, H, calc_grad_inputs, dy_dx, grad_inputs, gridtype, align_corners); break;
case 2: kernel_grid_backward_wrapper<scalar_t, 2>(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, S, H, calc_grad_inputs, dy_dx, grad_inputs, gridtype, align_corners); break;
case 3: kernel_grid_backward_wrapper<scalar_t, 3>(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, S, H, calc_grad_inputs, dy_dx, grad_inputs, gridtype, align_corners); break;
case 4: kernel_grid_backward_wrapper<scalar_t, 4>(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, S, H, calc_grad_inputs, dy_dx, grad_inputs, gridtype, align_corners); break;
case 5: kernel_grid_backward_wrapper<scalar_t, 5>(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, S, H, calc_grad_inputs, dy_dx, grad_inputs, gridtype, align_corners); break;
default: throw std::runtime_error{"GridEncoding: D must be 1, 2, 3, 4, or 5."};
}
}
void grid_encode_forward(const at::Tensor inputs, const at::Tensor embeddings, const at::Tensor offsets, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, at::Tensor dy_dx, const uint32_t gridtype, const bool align_corners) {
CHECK_CUDA(inputs);
CHECK_CUDA(embeddings);
CHECK_CUDA(offsets);
CHECK_CUDA(outputs);
CHECK_CUDA(dy_dx);
CHECK_CONTIGUOUS(inputs);
CHECK_CONTIGUOUS(embeddings);
CHECK_CONTIGUOUS(offsets);
CHECK_CONTIGUOUS(outputs);
CHECK_CONTIGUOUS(dy_dx);
CHECK_IS_FLOATING(inputs);
CHECK_IS_FLOATING(embeddings);
CHECK_IS_INT(offsets);
CHECK_IS_FLOATING(outputs);
CHECK_IS_FLOATING(dy_dx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
embeddings.scalar_type(), "grid_encode_forward", ([&] {
grid_encode_forward_cuda<scalar_t>(inputs.data_ptr<float>(), embeddings.data_ptr<scalar_t>(), offsets.data_ptr<int>(), outputs.data_ptr<scalar_t>(), B, D, C, L, S, H, calc_grad_inputs, dy_dx.data_ptr<scalar_t>(), gridtype, align_corners);
}));
}
void grid_encode_backward(const at::Tensor grad, const at::Tensor inputs, const at::Tensor embeddings, const at::Tensor offsets, at::Tensor grad_embeddings, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, const at::Tensor dy_dx, at::Tensor grad_inputs, const uint32_t gridtype, const bool align_corners) {
CHECK_CUDA(grad);
CHECK_CUDA(inputs);
CHECK_CUDA(embeddings);
CHECK_CUDA(offsets);
CHECK_CUDA(grad_embeddings);
CHECK_CUDA(dy_dx);
CHECK_CUDA(grad_inputs);
CHECK_CONTIGUOUS(grad);
CHECK_CONTIGUOUS(inputs);
CHECK_CONTIGUOUS(embeddings);
CHECK_CONTIGUOUS(offsets);
CHECK_CONTIGUOUS(grad_embeddings);
CHECK_CONTIGUOUS(dy_dx);
CHECK_CONTIGUOUS(grad_inputs);
CHECK_IS_FLOATING(grad);
CHECK_IS_FLOATING(inputs);
CHECK_IS_FLOATING(embeddings);
CHECK_IS_INT(offsets);
CHECK_IS_FLOATING(grad_embeddings);
CHECK_IS_FLOATING(dy_dx);
CHECK_IS_FLOATING(grad_inputs);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "grid_encode_backward", ([&] {
grid_encode_backward_cuda<scalar_t>(grad.data_ptr<scalar_t>(), inputs.data_ptr<float>(), embeddings.data_ptr<scalar_t>(), offsets.data_ptr<int>(), grad_embeddings.data_ptr<scalar_t>(), B, D, C, L, S, H, calc_grad_inputs, dy_dx.data_ptr<scalar_t>(), grad_inputs.data_ptr<scalar_t>(), gridtype, align_corners);
}));
}
| 4245a196ce549f75ed0279e03284f29630ea3a0b.cu | #include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/torch.h>
#include <algorithm>
#include <stdexcept>
#include <stdint.h>
#include <cstdio>
#define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor")
#define CHECK_IS_INT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, #x " must be an int tensor")
#define CHECK_IS_FLOATING(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Float || x.scalar_type() == at::ScalarType::Half || x.scalar_type() == at::ScalarType::Double, #x " must be a floating tensor")
// just for compatability of half precision in AT_DISPATCH_FLOATING_TYPES_AND_HALF...
static inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) {
// requires CUDA >= 10 and ARCH >= 70
// this is very slow compared to float or __half2, and never used.
//return atomicAdd(reinterpret_cast<__half*>(address), val);
}
template <typename T>
static inline __host__ __device__ T div_round_up(T val, T divisor) {
return (val + divisor - 1) / divisor;
}
template <uint32_t D>
__device__ uint32_t fast_hash(const uint32_t pos_grid[D]) {
static_assert(D <= 7, "fast_hash can only hash up to 7 dimensions.");
// While 1 is technically not a good prime for hashing (or a prime at all), it helps memory coherence
// and is sufficient for our use case of obtaining a uniformly colliding index from high-dimensional
// coordinates.
constexpr uint32_t primes[7] = { 1, 2654435761, 805459861, 3674653429, 2097192037, 1434869437, 2165219737 };
uint32_t result = 0;
#pragma unroll
for (uint32_t i = 0; i < D; ++i) {
result ^= pos_grid[i] * primes[i];
}
return result;
}
template <uint32_t D, uint32_t C>
__device__ uint32_t get_grid_index(const uint32_t gridtype, const bool align_corners, const uint32_t ch, const uint32_t hashmap_size, const uint32_t resolution, const uint32_t pos_grid[D]) {
uint32_t stride = 1;
uint32_t index = 0;
#pragma unroll
for (uint32_t d = 0; d < D && stride <= hashmap_size; d++) {
index += pos_grid[d] * stride;
stride *= align_corners ? resolution: (resolution + 1);
}
// NOTE: for NeRF, the hash is in fact not necessary. Check https://github.com/NVlabs/instant-ngp/issues/97.
// gridtype: 0 == hash, 1 == tiled
if (gridtype == 0 && stride > hashmap_size) {
index = fast_hash<D>(pos_grid);
}
return (index % hashmap_size) * C + ch;
}
template <typename scalar_t, uint32_t D, uint32_t C>
__global__ void kernel_grid(
const float * __restrict__ inputs,
const scalar_t * __restrict__ grid,
const int * __restrict__ offsets,
scalar_t * __restrict__ outputs,
const uint32_t B, const uint32_t L, const float S, const uint32_t H,
const bool calc_grad_inputs,
scalar_t * __restrict__ dy_dx,
const uint32_t gridtype,
const bool align_corners
) {
const uint32_t b = blockIdx.x * blockDim.x + threadIdx.x;
if (b >= B) return;
const uint32_t level = blockIdx.y;
// locate
grid += (uint32_t)offsets[level] * C;
inputs += b * D;
outputs += level * B * C + b * C;
// check input range (should be in [0, 1])
bool flag_oob = false;
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
if (inputs[d] < 0 || inputs[d] > 1) {
flag_oob = true;
}
}
// if input out of bound, just set output to 0
if (flag_oob) {
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
outputs[ch] = 0;
}
if (calc_grad_inputs) {
dy_dx += b * D * L * C + level * D * C; // B L D C
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
dy_dx[d * C + ch] = 0;
}
}
}
return;
}
const uint32_t hashmap_size = offsets[level + 1] - offsets[level];
const float scale = exp2f(level * S) * H - 1.0f;
const uint32_t resolution = (uint32_t)ceil(scale) + 1;
// calculate coordinate
float pos[D];
uint32_t pos_grid[D];
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
pos[d] = inputs[d] * scale + (align_corners ? 0.0f : 0.5f);
pos_grid[d] = floorf(pos[d]);
pos[d] -= (float)pos_grid[d];
}
//printf("[b=%d, l=%d] pos=(%f, %f)+(%d, %d)\n", b, level, pos[0], pos[1], pos_grid[0], pos_grid[1]);
// interpolate
scalar_t results[C] = {0}; // temp results in register
#pragma unroll
for (uint32_t idx = 0; idx < (1 << D); idx++) {
float w = 1;
uint32_t pos_grid_local[D];
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
if ((idx & (1 << d)) == 0) {
w *= 1 - pos[d];
pos_grid_local[d] = pos_grid[d];
} else {
w *= pos[d];
pos_grid_local[d] = pos_grid[d] + 1;
}
}
uint32_t index = get_grid_index<D, C>(gridtype, align_corners, 0, hashmap_size, resolution, pos_grid_local);
// writing to register (fast)
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
results[ch] += w * grid[index + ch];
}
//printf("[b=%d, l=%d] int %d, idx %d, w %f, val %f\n", b, level, idx, index, w, grid[index]);
}
// writing to global memory (slow)
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
outputs[ch] = results[ch];
}
// prepare dy_dx for calc_grad_inputs
// differentiable (soft) indexing: https://discuss.pytorch.org/t/differentiable-indexing/17647/9
if (calc_grad_inputs) {
dy_dx += b * D * L * C + level * D * C; // B L D C
#pragma unroll
for (uint32_t gd = 0; gd < D; gd++) {
scalar_t results_grad[C] = {0};
#pragma unroll
for (uint32_t idx = 0; idx < (1 << (D - 1)); idx++) {
float w = scale;
uint32_t pos_grid_local[D];
#pragma unroll
for (uint32_t nd = 0; nd < D - 1; nd++) {
const uint32_t d = (nd >= gd) ? (nd + 1) : nd;
if ((idx & (1 << nd)) == 0) {
w *= 1 - pos[d];
pos_grid_local[d] = pos_grid[d];
} else {
w *= pos[d];
pos_grid_local[d] = pos_grid[d] + 1;
}
}
pos_grid_local[gd] = pos_grid[gd];
uint32_t index_left = get_grid_index<D, C>(gridtype, align_corners, 0, hashmap_size, resolution, pos_grid_local);
pos_grid_local[gd] = pos_grid[gd] + 1;
uint32_t index_right = get_grid_index<D, C>(gridtype, align_corners, 0, hashmap_size, resolution, pos_grid_local);
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
results_grad[ch] += w * (grid[index_right + ch] - grid[index_left + ch]);
}
}
#pragma unroll
for (uint32_t ch = 0; ch < C; ch++) {
dy_dx[gd * C + ch] = results_grad[ch];
}
}
}
}
template <typename scalar_t, uint32_t D, uint32_t C, uint32_t N_C>
__global__ void kernel_grid_backward(
const scalar_t * __restrict__ grad,
const float * __restrict__ inputs,
const scalar_t * __restrict__ grid,
const int * __restrict__ offsets,
scalar_t * __restrict__ grad_grid,
const uint32_t B, const uint32_t L, const float S, const uint32_t H,
const uint32_t gridtype,
const bool align_corners
) {
const uint32_t b = (blockIdx.x * blockDim.x + threadIdx.x) * N_C / C;
if (b >= B) return;
const uint32_t level = blockIdx.y;
const uint32_t ch = (blockIdx.x * blockDim.x + threadIdx.x) * N_C - b * C;
// locate
grad_grid += offsets[level] * C;
inputs += b * D;
grad += level * B * C + b * C + ch; // L, B, C
const uint32_t hashmap_size = offsets[level + 1] - offsets[level];
const float scale = exp2f(level * S) * H - 1.0f;
const uint32_t resolution = (uint32_t)ceil(scale) + 1;
// check input range (should be in [0, 1])
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
if (inputs[d] < 0 || inputs[d] > 1) {
return; // grad is init as 0, so we simply return.
}
}
// calculate coordinate
float pos[D];
uint32_t pos_grid[D];
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
pos[d] = inputs[d] * scale + (align_corners ? 0.0f : 0.5f);
pos_grid[d] = floorf(pos[d]);
pos[d] -= (float)pos_grid[d];
}
scalar_t grad_cur[N_C] = {0}; // fetch to register
#pragma unroll
for (uint32_t c = 0; c < N_C; c++) {
grad_cur[c] = grad[c];
}
// interpolate
#pragma unroll
for (uint32_t idx = 0; idx < (1 << D); idx++) {
float w = 1;
uint32_t pos_grid_local[D];
#pragma unroll
for (uint32_t d = 0; d < D; d++) {
if ((idx & (1 << d)) == 0) {
w *= 1 - pos[d];
pos_grid_local[d] = pos_grid[d];
} else {
w *= pos[d];
pos_grid_local[d] = pos_grid[d] + 1;
}
}
uint32_t index = get_grid_index<D, C>(gridtype, align_corners, ch, hashmap_size, resolution, pos_grid_local);
// atomicAdd for __half is slow (especially for large values), so we use __half2 if N_C % 2 == 0
// TODO: use float which is better than __half, if N_C % 2 != 0
if (std::is_same<scalar_t, at::Half>::value && N_C % 2 == 0) {
#pragma unroll
for (uint32_t c = 0; c < N_C; c += 2) {
// process two __half at once (by interpreting as a __half2)
__half2 v = {(__half)(w * grad_cur[c]), (__half)(w * grad_cur[c + 1])};
atomicAdd((__half2*)&grad_grid[index + c], v);
}
// float, or __half when N_C % 2 != 0 (which means C == 1)
} else {
#pragma unroll
for (uint32_t c = 0; c < N_C; c++) {
atomicAdd((float*)&grad_grid[index + c], (float)(w * grad_cur[c]));
}
}
}
}
template <typename scalar_t, uint32_t D, uint32_t C>
__global__ void kernel_input_backward(
const scalar_t * __restrict__ grad,
const scalar_t * __restrict__ dy_dx,
scalar_t * __restrict__ grad_inputs,
uint32_t B, uint32_t L
) {
const uint32_t t = threadIdx.x + blockIdx.x * blockDim.x;
if (t >= B * D) return;
const uint32_t b = t / D;
const uint32_t d = t - b * D;
dy_dx += b * L * D * C;
scalar_t result = 0;
# pragma unroll
for (int l = 0; l < L; l++) {
# pragma unroll
for (int ch = 0; ch < C; ch++) {
result += grad[l * B * C + b * C + ch] * dy_dx[l * D * C + d * C + ch];
}
}
grad_inputs[t] = result;
}
template <typename scalar_t, uint32_t D>
void kernel_grid_wrapper(const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *outputs, const uint32_t B, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, scalar_t *dy_dx, const uint32_t gridtype, const bool align_corners) {
static constexpr uint32_t N_THREAD = 512;
const dim3 blocks_hashgrid = { div_round_up(B, N_THREAD), L, 1 };
switch (C) {
case 1: kernel_grid<scalar_t, D, 1><<<blocks_hashgrid, N_THREAD>>>(inputs, embeddings, offsets, outputs, B, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 2: kernel_grid<scalar_t, D, 2><<<blocks_hashgrid, N_THREAD>>>(inputs, embeddings, offsets, outputs, B, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 4: kernel_grid<scalar_t, D, 4><<<blocks_hashgrid, N_THREAD>>>(inputs, embeddings, offsets, outputs, B, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 8: kernel_grid<scalar_t, D, 8><<<blocks_hashgrid, N_THREAD>>>(inputs, embeddings, offsets, outputs, B, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
default: throw std::runtime_error{"GridEncoding: C must be 1, 2, 4, or 8."};
}
}
// inputs: [B, D], float, in [0, 1]
// embeddings: [sO, C], float
// offsets: [L + 1], uint32_t
// outputs: [L, B, C], float (L first, so only one level of hashmap needs to fit into cache at a time.)
// H: base resolution
// dy_dx: [B, L * D * C]
template <typename scalar_t>
void grid_encode_forward_cuda(const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *outputs, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, scalar_t *dy_dx, const uint32_t gridtype, const bool align_corners) {
switch (D) {
case 1: kernel_grid_wrapper<scalar_t, 1>(inputs, embeddings, offsets, outputs, B, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 2: kernel_grid_wrapper<scalar_t, 2>(inputs, embeddings, offsets, outputs, B, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 3: kernel_grid_wrapper<scalar_t, 3>(inputs, embeddings, offsets, outputs, B, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 4: kernel_grid_wrapper<scalar_t, 4>(inputs, embeddings, offsets, outputs, B, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
case 5: kernel_grid_wrapper<scalar_t, 5>(inputs, embeddings, offsets, outputs, B, C, L, S, H, calc_grad_inputs, dy_dx, gridtype, align_corners); break;
default: throw std::runtime_error{"GridEncoding: D must be 1, 2, 3, 4, or 5."};
}
}
template <typename scalar_t, uint32_t D>
void kernel_grid_backward_wrapper(const scalar_t *grad, const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *grad_embeddings, const uint32_t B, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, scalar_t *dy_dx, scalar_t *grad_inputs, const uint32_t gridtype, const bool align_corners) {
static constexpr uint32_t N_THREAD = 256;
const uint32_t N_C = std::min(2u, C); // n_features_per_thread
const dim3 blocks_hashgrid = { div_round_up(B * C / N_C, N_THREAD), L, 1 };
switch (C) {
case 1:
kernel_grid_backward<scalar_t, D, 1, 1><<<blocks_hashgrid, N_THREAD>>>(grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners);
if (calc_grad_inputs) kernel_input_backward<scalar_t, D, 1><<<div_round_up(B * D, N_THREAD), N_THREAD>>>(grad, dy_dx, grad_inputs, B, L);
break;
case 2:
kernel_grid_backward<scalar_t, D, 2, 2><<<blocks_hashgrid, N_THREAD>>>(grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners);
if (calc_grad_inputs) kernel_input_backward<scalar_t, D, 2><<<div_round_up(B * D, N_THREAD), N_THREAD>>>(grad, dy_dx, grad_inputs, B, L);
break;
case 4:
kernel_grid_backward<scalar_t, D, 4, 2><<<blocks_hashgrid, N_THREAD>>>(grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners);
if (calc_grad_inputs) kernel_input_backward<scalar_t, D, 4><<<div_round_up(B * D, N_THREAD), N_THREAD>>>(grad, dy_dx, grad_inputs, B, L);
break;
case 8:
kernel_grid_backward<scalar_t, D, 8, 2><<<blocks_hashgrid, N_THREAD>>>(grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners);
if (calc_grad_inputs) kernel_input_backward<scalar_t, D, 8><<<div_round_up(B * D, N_THREAD), N_THREAD>>>(grad, dy_dx, grad_inputs, B, L);
break;
default: throw std::runtime_error{"GridEncoding: C must be 1, 2, 4, or 8."};
}
}
// grad: [L, B, C], float
// inputs: [B, D], float, in [0, 1]
// embeddings: [sO, C], float
// offsets: [L + 1], uint32_t
// grad_embeddings: [sO, C]
// H: base resolution
template <typename scalar_t>
void grid_encode_backward_cuda(const scalar_t *grad, const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *grad_embeddings, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, scalar_t *dy_dx, scalar_t *grad_inputs, const uint32_t gridtype, const bool align_corners) {
switch (D) {
case 1: kernel_grid_backward_wrapper<scalar_t, 1>(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, S, H, calc_grad_inputs, dy_dx, grad_inputs, gridtype, align_corners); break;
case 2: kernel_grid_backward_wrapper<scalar_t, 2>(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, S, H, calc_grad_inputs, dy_dx, grad_inputs, gridtype, align_corners); break;
case 3: kernel_grid_backward_wrapper<scalar_t, 3>(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, S, H, calc_grad_inputs, dy_dx, grad_inputs, gridtype, align_corners); break;
case 4: kernel_grid_backward_wrapper<scalar_t, 4>(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, S, H, calc_grad_inputs, dy_dx, grad_inputs, gridtype, align_corners); break;
case 5: kernel_grid_backward_wrapper<scalar_t, 5>(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, S, H, calc_grad_inputs, dy_dx, grad_inputs, gridtype, align_corners); break;
default: throw std::runtime_error{"GridEncoding: D must be 1, 2, 3, 4, or 5."};
}
}
void grid_encode_forward(const at::Tensor inputs, const at::Tensor embeddings, const at::Tensor offsets, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, at::Tensor dy_dx, const uint32_t gridtype, const bool align_corners) {
CHECK_CUDA(inputs);
CHECK_CUDA(embeddings);
CHECK_CUDA(offsets);
CHECK_CUDA(outputs);
CHECK_CUDA(dy_dx);
CHECK_CONTIGUOUS(inputs);
CHECK_CONTIGUOUS(embeddings);
CHECK_CONTIGUOUS(offsets);
CHECK_CONTIGUOUS(outputs);
CHECK_CONTIGUOUS(dy_dx);
CHECK_IS_FLOATING(inputs);
CHECK_IS_FLOATING(embeddings);
CHECK_IS_INT(offsets);
CHECK_IS_FLOATING(outputs);
CHECK_IS_FLOATING(dy_dx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
embeddings.scalar_type(), "grid_encode_forward", ([&] {
grid_encode_forward_cuda<scalar_t>(inputs.data_ptr<float>(), embeddings.data_ptr<scalar_t>(), offsets.data_ptr<int>(), outputs.data_ptr<scalar_t>(), B, D, C, L, S, H, calc_grad_inputs, dy_dx.data_ptr<scalar_t>(), gridtype, align_corners);
}));
}
void grid_encode_backward(const at::Tensor grad, const at::Tensor inputs, const at::Tensor embeddings, const at::Tensor offsets, at::Tensor grad_embeddings, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const bool calc_grad_inputs, const at::Tensor dy_dx, at::Tensor grad_inputs, const uint32_t gridtype, const bool align_corners) {
CHECK_CUDA(grad);
CHECK_CUDA(inputs);
CHECK_CUDA(embeddings);
CHECK_CUDA(offsets);
CHECK_CUDA(grad_embeddings);
CHECK_CUDA(dy_dx);
CHECK_CUDA(grad_inputs);
CHECK_CONTIGUOUS(grad);
CHECK_CONTIGUOUS(inputs);
CHECK_CONTIGUOUS(embeddings);
CHECK_CONTIGUOUS(offsets);
CHECK_CONTIGUOUS(grad_embeddings);
CHECK_CONTIGUOUS(dy_dx);
CHECK_CONTIGUOUS(grad_inputs);
CHECK_IS_FLOATING(grad);
CHECK_IS_FLOATING(inputs);
CHECK_IS_FLOATING(embeddings);
CHECK_IS_INT(offsets);
CHECK_IS_FLOATING(grad_embeddings);
CHECK_IS_FLOATING(dy_dx);
CHECK_IS_FLOATING(grad_inputs);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "grid_encode_backward", ([&] {
grid_encode_backward_cuda<scalar_t>(grad.data_ptr<scalar_t>(), inputs.data_ptr<float>(), embeddings.data_ptr<scalar_t>(), offsets.data_ptr<int>(), grad_embeddings.data_ptr<scalar_t>(), B, D, C, L, S, H, calc_grad_inputs, dy_dx.data_ptr<scalar_t>(), grad_inputs.data_ptr<scalar_t>(), gridtype, align_corners);
}));
}
|
85d3bc5602271766a0503017d31a719210561cef.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math_functions.h>
#include "bbob_generators.cuh"
__device__ double fitness_function(double x[], int number_of_variables, double* xopt, double fopt, double *rot1, double *rot2)
{
const double condition = 100;
const double alpha = 10.0;
size_t i, j;
double penalty = 0.0, x1;
double result;
double tempx[MAX_DIMENSIONS];
double tempxx[MAX_DIMENSIONS];
double* current_row;
for(i = 0; i < number_of_variables; ++i)
{
double tmp;
tmp = fabs(x[i]) - 5.0;
if(tmp > 0.0)
penalty += tmp * tmp;
}
for(i = 0; i < number_of_variables; ++i)
{
double c1;
tempx[i] = 0.0;
current_row = rot2 + i * number_of_variables;
c1 = sqrt(pow(condition / 10., (double)i / (double)(number_of_variables - 1)));
for(j = 0; j < number_of_variables; ++j)
{
tempx[i] += c1 * current_row[j] * (x[j] - xopt[j]);
}
}
x1 = tempx[0];
for(i = 0; i < number_of_variables; ++i)
{
if(fabs(tempx[i]) > 0.5)
tempx[i] = coco_double_round(tempx[i]);
else
tempx[i] = coco_double_round(alpha * tempx[i]) / alpha;
}
for(i = 0; i < number_of_variables; ++i)
{
tempxx[i] = 0.0;
current_row = rot1 + i * number_of_variables;
for(j = 0; j < number_of_variables; ++j)
{
tempxx[i] += current_row[j] * tempx[j];
}
}
/* Computation core */
result = 0.0;
for(i = 0; i < number_of_variables; ++i)
{
double exponent;
exponent = (double)(long)i / ((double)(long)number_of_variables - 1.0);
result += pow(condition, exponent) * tempxx[i] * tempxx[i];
;
}
result = 0.1 * coco_double_max(fabs(x1) * 1.0e-4, result) + penalty + fopt;
return result;
}
__device__ double wrapped_fitness_function(double x[], int number_of_variables,
double* xopt, double* rot1, double* rot2, double fopt)
{
double temp[1];
temp[0] = fitness_function(x, number_of_variables, xopt, fopt, rot1, rot2);
return temp[0];
}
extern "C" {
__global__ void generateData(int dimension,
int rseed,
int function,
int instance,
double* rot1,
double* rot2,
double* vars_shift_xopt,
double* obj_shift_fopt)
{
bbob2009_compute_xopt(vars_shift_xopt, rseed, dimension);
obj_shift_fopt[0] = bbob2009_compute_fopt(function, instance);
double rot1d[MAX_DIMENSIONS][MAX_DIMENSIONS];
double rot2d[MAX_DIMENSIONS][MAX_DIMENSIONS];
bbob2009_compute_rotation(dimension, rot1d, rseed + 1000000);
bbob2009_compute_rotation(dimension, rot2d, rseed);
double *current_row_1;
double *current_row_2;
for(int i = 0; i < dimension; ++i)
{
current_row_1 = rot1 + i * dimension;
current_row_2 = rot2 + i * dimension;
for(int j = 0; j < dimension; ++j)
{
current_row_1[j] = rot1d[i][j];
current_row_2[j] = rot2d[i][j];
}
}
}
__global__ void transposeKernel(
double* positions,
double* velocities,
double* personalBests,
double* personalBestValues,
int particlesCount,
int dimensionsCount,
double* xopt, double* rot1, double* rot2, double fopt)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= particlesCount) return;
double* particleLoc = positions + i * dimensionsCount;
double* particleVel = velocities + i * dimensionsCount;
for(int i = 0; i < dimensionsCount; i++)
{
particleLoc[i] += particleVel[i];
}
clamp(particleLoc, dimensionsCount, -5.0, 5.0);
double tempLocation[MAX_DIMENSIONS];
for(int i = 0; i < dimensionsCount; i++)
{
tempLocation[i] = particleLoc[i];
}
double newValue = wrapped_fitness_function(tempLocation, dimensionsCount, xopt, rot1, rot2, fopt);
if(newValue < personalBestValues[i])
{
personalBestValues[i] = newValue;
double* particlePersonalBest = personalBests + i * dimensionsCount;
for(int i = 0; i < dimensionsCount; i++)
particlePersonalBest[i] = particleLoc[i];
}
}
} | 85d3bc5602271766a0503017d31a719210561cef.cu | #include <cuda_runtime.h>
#include <cuda.h>
#include <math_functions.h>
#include "bbob_generators.cuh"
__device__ double fitness_function(double x[], int number_of_variables, double* xopt, double fopt, double *rot1, double *rot2)
{
const double condition = 100;
const double alpha = 10.0;
size_t i, j;
double penalty = 0.0, x1;
double result;
double tempx[MAX_DIMENSIONS];
double tempxx[MAX_DIMENSIONS];
double* current_row;
for(i = 0; i < number_of_variables; ++i)
{
double tmp;
tmp = fabs(x[i]) - 5.0;
if(tmp > 0.0)
penalty += tmp * tmp;
}
for(i = 0; i < number_of_variables; ++i)
{
double c1;
tempx[i] = 0.0;
current_row = rot2 + i * number_of_variables;
c1 = sqrt(pow(condition / 10., (double)i / (double)(number_of_variables - 1)));
for(j = 0; j < number_of_variables; ++j)
{
tempx[i] += c1 * current_row[j] * (x[j] - xopt[j]);
}
}
x1 = tempx[0];
for(i = 0; i < number_of_variables; ++i)
{
if(fabs(tempx[i]) > 0.5)
tempx[i] = coco_double_round(tempx[i]);
else
tempx[i] = coco_double_round(alpha * tempx[i]) / alpha;
}
for(i = 0; i < number_of_variables; ++i)
{
tempxx[i] = 0.0;
current_row = rot1 + i * number_of_variables;
for(j = 0; j < number_of_variables; ++j)
{
tempxx[i] += current_row[j] * tempx[j];
}
}
/* Computation core */
result = 0.0;
for(i = 0; i < number_of_variables; ++i)
{
double exponent;
exponent = (double)(long)i / ((double)(long)number_of_variables - 1.0);
result += pow(condition, exponent) * tempxx[i] * tempxx[i];
;
}
result = 0.1 * coco_double_max(fabs(x1) * 1.0e-4, result) + penalty + fopt;
return result;
}
__device__ double wrapped_fitness_function(double x[], int number_of_variables,
double* xopt, double* rot1, double* rot2, double fopt)
{
double temp[1];
temp[0] = fitness_function(x, number_of_variables, xopt, fopt, rot1, rot2);
return temp[0];
}
extern "C" {
__global__ void generateData(int dimension,
int rseed,
int function,
int instance,
double* rot1,
double* rot2,
double* vars_shift_xopt,
double* obj_shift_fopt)
{
bbob2009_compute_xopt(vars_shift_xopt, rseed, dimension);
obj_shift_fopt[0] = bbob2009_compute_fopt(function, instance);
double rot1d[MAX_DIMENSIONS][MAX_DIMENSIONS];
double rot2d[MAX_DIMENSIONS][MAX_DIMENSIONS];
bbob2009_compute_rotation(dimension, rot1d, rseed + 1000000);
bbob2009_compute_rotation(dimension, rot2d, rseed);
double *current_row_1;
double *current_row_2;
for(int i = 0; i < dimension; ++i)
{
current_row_1 = rot1 + i * dimension;
current_row_2 = rot2 + i * dimension;
for(int j = 0; j < dimension; ++j)
{
current_row_1[j] = rot1d[i][j];
current_row_2[j] = rot2d[i][j];
}
}
}
__global__ void transposeKernel(
double* positions,
double* velocities,
double* personalBests,
double* personalBestValues,
int particlesCount,
int dimensionsCount,
double* xopt, double* rot1, double* rot2, double fopt)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= particlesCount) return;
double* particleLoc = positions + i * dimensionsCount;
double* particleVel = velocities + i * dimensionsCount;
for(int i = 0; i < dimensionsCount; i++)
{
particleLoc[i] += particleVel[i];
}
clamp(particleLoc, dimensionsCount, -5.0, 5.0);
double tempLocation[MAX_DIMENSIONS];
for(int i = 0; i < dimensionsCount; i++)
{
tempLocation[i] = particleLoc[i];
}
double newValue = wrapped_fitness_function(tempLocation, dimensionsCount, xopt, rot1, rot2, fopt);
if(newValue < personalBestValues[i])
{
personalBestValues[i] = newValue;
double* particlePersonalBest = personalBests + i * dimensionsCount;
for(int i = 0; i < dimensionsCount; i++)
particlePersonalBest[i] = particleLoc[i];
}
}
} |
2014fe38bd4fd37c3bf048f0be0554f5518c7ba0.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <cstring>
#include "blas.h"
#include "dark_cuda.h"
#include "dropout_layer.h"
#include "image.h"
#include "image_opencv.h"
#include "utils.h"
__global__ void dropblock_fast_kernel(float* rand, float prob, int w, int h,
int spatial, int filters, int batch, int block_size,
float* drop_blocks_scale, float* output)
{
const int threads = BLOCK;
const int id = threadIdx.x;
const int f = blockIdx.x % filters;
const int b = blockIdx.x / filters;
__shared__ int prob_block;
__shared__ int index_block;
if (id == 0)
{
prob_block = 1.0 * 1000000;
index_block = -1;
}
__syncthreads();
int i;
for (i = id; i < spatial; i += threads)
{
int index = b * spatial * f + f * spatial + i;
if (rand[index] < prob)
{
// Chose with the lowest rand[i]
int new_val = rand[index] * 1000000;
rand[index] = 1;
int old_val = atomicMin(&prob_block, new_val);
if (new_val < old_val)
{
index_block = i;
// if (b == 0) printf("\n rand[i] = %f, prob = %f, b = %d, f = %d, i =
// %d, index_block = %d \n", rand[i], prob, b, f, i, index_block);
}
}
}
__syncthreads();
if (index_block == -1)
return;
int b_x = index_block % w;
int b_y = index_block / w;
if (b_x > (w - block_size))
b_x = b_x - (w - block_size);
if (b_y > (h - block_size))
b_y = b_y - (h - block_size);
b_x = max(0, min(b_x, w - block_size));
b_y = max(0, min(b_y, h - block_size));
int block_square_size = block_size * block_size;
for (i = id; i < block_square_size; i += threads)
{
int i_x = i % block_size;
int i_y = i / block_size;
int x = b_x + i_x;
int y = b_y + i_y;
if (x >= 0 && x < w && y >= 0 && y < h)
{
int new_index = b * filters * spatial + f * spatial + y * w + x;
output[new_index] = 0;
rand[new_index] = 0;
}
}
// if (id == 0 && b == 0) printf(" f = %d, b = %d \n", f, b);
if (id == 0 && drop_blocks_scale)
{
atomicAdd(&drop_blocks_scale[b], block_square_size);
// if(b == 0) printf("\n index_block = %d \n", index_block);
}
}
__global__ void set_scales_dropblock_kernel(float* drop_blocks_scale,
int block_size_w, int block_size_h, int outputs, int batch)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= batch)
return;
// printf(" drop_blocks_scale[index] = %f \n", drop_blocks_scale[index]);
const float prob = drop_blocks_scale[index] / (float)outputs;
const float scale = 1.0f / (1.0f - prob);
drop_blocks_scale[index] = scale;
}
__global__ void scale_dropblock_kernel(
float* output, int size, int outputs, float* drop_blocks_scale)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
const int b = index / outputs;
output[index] *= drop_blocks_scale[b];
}
__global__ void backward_dropblock_kernel(float* pass, float* delta, int size)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
if (pass[index] == 0)
delta[index] = 0;
}
__global__ void yoloswag420blazeit360noscope(
float* input, int size, float* rand, float prob, float scale)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id < size)
input[id] = (rand[id] < prob) ? 0 : input[id] * scale;
}
void ForwardDropoutLayerGpu(layer* l, NetworkState state)
{
if (!state.train)
return;
int iteration_num = GetCurrIter(state.net);
// We gradually increase the block size and the probability of dropout -
// during the first half of the training
float multiplier = 1.0;
if (iteration_num < (state.net->max_iter * 0.85))
multiplier = (iteration_num / (float)(state.net->max_iter * 0.85));
// dropblock
if (l->dropblock)
{
const float cur_prob = l->probability * multiplier;
int block_width = l->dropblock_size_abs * multiplier;
int block_height = l->dropblock_size_abs * multiplier;
if (l->dropblock_size_rel)
{
block_width = l->dropblock_size_rel * l->w * multiplier;
block_height = l->dropblock_size_rel * l->h * multiplier;
}
block_width = max_val_cmp(1, block_width);
block_height = max_val_cmp(1, block_height);
block_width = min_val_cmp(l->w, block_width);
block_height = min_val_cmp(l->h, block_height);
const int block_size = min_val_cmp(block_width, block_height);
const float block_prob = cur_prob / (block_size * block_size);
assert(block_size <= l->w && block_size <= l->h);
const int size = l->inputs * l->batch;
cuda_random(l->rand_gpu, size);
fill_ongpu(l->batch, 0, l->drop_blocks_scale_gpu, 1);
int num_blocks = l->batch * l->c;
hipLaunchKernelGGL(( dropblock_fast_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(),
l->rand_gpu, block_prob, l->w, l->h, l->w * l->h, l->c, l->batch,
block_size, l->drop_blocks_scale_gpu, state.input);
CHECK_CUDA(hipPeekAtLastError());
num_blocks = get_number_of_blocks(l->batch, BLOCK);
hipLaunchKernelGGL(( set_scales_dropblock_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(),
l->drop_blocks_scale_gpu, block_size, block_size, l->outputs, l->batch);
CHECK_CUDA(hipPeekAtLastError());
num_blocks = get_number_of_blocks(l->outputs * l->batch, BLOCK);
hipLaunchKernelGGL(( scale_dropblock_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(),
state.input, l->outputs * l->batch, l->outputs,
l->drop_blocks_scale_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
// dropout
else
{
int size = l->inputs * l->batch;
cuda_random(l->rand_gpu, size);
hipLaunchKernelGGL(( yoloswag420blazeit360noscope), dim3(cuda_gridsize(size)), dim3(BLOCK), 0,
get_cuda_stream(),
state.input, size, l->rand_gpu, l->probability, l->scale);
CHECK_CUDA(hipPeekAtLastError());
}
}
void BackwardDropoutLayerGpu(layer* l, NetworkState state)
{
if (!state.delta)
return;
const int size = l->inputs * l->batch;
// dropblock
if (l->dropblock)
{
int iteration_num = GetCurrIter(state.net);
float multiplier = 1.0;
if (iteration_num < (state.net->max_iter * 0.85))
multiplier = (iteration_num / (float)(state.net->max_iter * 0.85));
int block_width = l->dropblock_size_abs * multiplier;
int block_height = l->dropblock_size_abs * multiplier;
if (l->dropblock_size_rel)
{
block_width = l->dropblock_size_rel * l->w * multiplier;
block_height = l->dropblock_size_rel * l->h * multiplier;
}
block_width = max_val_cmp(1, block_width);
block_height = max_val_cmp(1, block_height);
block_width = min_val_cmp(l->w, block_width);
block_height = min_val_cmp(l->h, block_height);
int num_blocks = get_number_of_blocks(l->outputs * l->batch, BLOCK);
hipLaunchKernelGGL(( backward_dropblock_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(),
l->rand_gpu, state.delta, l->outputs * l->batch);
CHECK_CUDA(hipPeekAtLastError());
hipLaunchKernelGGL(( scale_dropblock_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(),
state.delta, l->outputs * l->batch, l->outputs,
l->drop_blocks_scale_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
// dropout
else
{
hipLaunchKernelGGL(( yoloswag420blazeit360noscope), dim3(cuda_gridsize(size)), dim3(BLOCK), 0,
get_cuda_stream(),
state.delta, size, l->rand_gpu, l->probability, l->scale);
CHECK_CUDA(hipPeekAtLastError());
}
}
| 2014fe38bd4fd37c3bf048f0be0554f5518c7ba0.cu | #include <cublas_v2.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <cstring>
#include "blas.h"
#include "dark_cuda.h"
#include "dropout_layer.h"
#include "image.h"
#include "image_opencv.h"
#include "utils.h"
__global__ void dropblock_fast_kernel(float* rand, float prob, int w, int h,
int spatial, int filters, int batch, int block_size,
float* drop_blocks_scale, float* output)
{
const int threads = BLOCK;
const int id = threadIdx.x;
const int f = blockIdx.x % filters;
const int b = blockIdx.x / filters;
__shared__ int prob_block;
__shared__ int index_block;
if (id == 0)
{
prob_block = 1.0 * 1000000;
index_block = -1;
}
__syncthreads();
int i;
for (i = id; i < spatial; i += threads)
{
int index = b * spatial * f + f * spatial + i;
if (rand[index] < prob)
{
// Chose with the lowest rand[i]
int new_val = rand[index] * 1000000;
rand[index] = 1;
int old_val = atomicMin(&prob_block, new_val);
if (new_val < old_val)
{
index_block = i;
// if (b == 0) printf("\n rand[i] = %f, prob = %f, b = %d, f = %d, i =
// %d, index_block = %d \n", rand[i], prob, b, f, i, index_block);
}
}
}
__syncthreads();
if (index_block == -1)
return;
int b_x = index_block % w;
int b_y = index_block / w;
if (b_x > (w - block_size))
b_x = b_x - (w - block_size);
if (b_y > (h - block_size))
b_y = b_y - (h - block_size);
b_x = max(0, min(b_x, w - block_size));
b_y = max(0, min(b_y, h - block_size));
int block_square_size = block_size * block_size;
for (i = id; i < block_square_size; i += threads)
{
int i_x = i % block_size;
int i_y = i / block_size;
int x = b_x + i_x;
int y = b_y + i_y;
if (x >= 0 && x < w && y >= 0 && y < h)
{
int new_index = b * filters * spatial + f * spatial + y * w + x;
output[new_index] = 0;
rand[new_index] = 0;
}
}
// if (id == 0 && b == 0) printf(" f = %d, b = %d \n", f, b);
if (id == 0 && drop_blocks_scale)
{
atomicAdd(&drop_blocks_scale[b], block_square_size);
// if(b == 0) printf("\n index_block = %d \n", index_block);
}
}
__global__ void set_scales_dropblock_kernel(float* drop_blocks_scale,
int block_size_w, int block_size_h, int outputs, int batch)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= batch)
return;
// printf(" drop_blocks_scale[index] = %f \n", drop_blocks_scale[index]);
const float prob = drop_blocks_scale[index] / (float)outputs;
const float scale = 1.0f / (1.0f - prob);
drop_blocks_scale[index] = scale;
}
__global__ void scale_dropblock_kernel(
float* output, int size, int outputs, float* drop_blocks_scale)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
const int b = index / outputs;
output[index] *= drop_blocks_scale[b];
}
__global__ void backward_dropblock_kernel(float* pass, float* delta, int size)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size)
return;
if (pass[index] == 0)
delta[index] = 0;
}
__global__ void yoloswag420blazeit360noscope(
float* input, int size, float* rand, float prob, float scale)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id < size)
input[id] = (rand[id] < prob) ? 0 : input[id] * scale;
}
void ForwardDropoutLayerGpu(layer* l, NetworkState state)
{
if (!state.train)
return;
int iteration_num = GetCurrIter(state.net);
// We gradually increase the block size and the probability of dropout -
// during the first half of the training
float multiplier = 1.0;
if (iteration_num < (state.net->max_iter * 0.85))
multiplier = (iteration_num / (float)(state.net->max_iter * 0.85));
// dropblock
if (l->dropblock)
{
const float cur_prob = l->probability * multiplier;
int block_width = l->dropblock_size_abs * multiplier;
int block_height = l->dropblock_size_abs * multiplier;
if (l->dropblock_size_rel)
{
block_width = l->dropblock_size_rel * l->w * multiplier;
block_height = l->dropblock_size_rel * l->h * multiplier;
}
block_width = max_val_cmp(1, block_width);
block_height = max_val_cmp(1, block_height);
block_width = min_val_cmp(l->w, block_width);
block_height = min_val_cmp(l->h, block_height);
const int block_size = min_val_cmp(block_width, block_height);
const float block_prob = cur_prob / (block_size * block_size);
assert(block_size <= l->w && block_size <= l->h);
const int size = l->inputs * l->batch;
cuda_random(l->rand_gpu, size);
fill_ongpu(l->batch, 0, l->drop_blocks_scale_gpu, 1);
int num_blocks = l->batch * l->c;
dropblock_fast_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>(
l->rand_gpu, block_prob, l->w, l->h, l->w * l->h, l->c, l->batch,
block_size, l->drop_blocks_scale_gpu, state.input);
CHECK_CUDA(cudaPeekAtLastError());
num_blocks = get_number_of_blocks(l->batch, BLOCK);
set_scales_dropblock_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>(
l->drop_blocks_scale_gpu, block_size, block_size, l->outputs, l->batch);
CHECK_CUDA(cudaPeekAtLastError());
num_blocks = get_number_of_blocks(l->outputs * l->batch, BLOCK);
scale_dropblock_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>(
state.input, l->outputs * l->batch, l->outputs,
l->drop_blocks_scale_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
// dropout
else
{
int size = l->inputs * l->batch;
cuda_random(l->rand_gpu, size);
yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK, 0,
get_cuda_stream()>>>(
state.input, size, l->rand_gpu, l->probability, l->scale);
CHECK_CUDA(cudaPeekAtLastError());
}
}
void BackwardDropoutLayerGpu(layer* l, NetworkState state)
{
if (!state.delta)
return;
const int size = l->inputs * l->batch;
// dropblock
if (l->dropblock)
{
int iteration_num = GetCurrIter(state.net);
float multiplier = 1.0;
if (iteration_num < (state.net->max_iter * 0.85))
multiplier = (iteration_num / (float)(state.net->max_iter * 0.85));
int block_width = l->dropblock_size_abs * multiplier;
int block_height = l->dropblock_size_abs * multiplier;
if (l->dropblock_size_rel)
{
block_width = l->dropblock_size_rel * l->w * multiplier;
block_height = l->dropblock_size_rel * l->h * multiplier;
}
block_width = max_val_cmp(1, block_width);
block_height = max_val_cmp(1, block_height);
block_width = min_val_cmp(l->w, block_width);
block_height = min_val_cmp(l->h, block_height);
int num_blocks = get_number_of_blocks(l->outputs * l->batch, BLOCK);
backward_dropblock_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>(
l->rand_gpu, state.delta, l->outputs * l->batch);
CHECK_CUDA(cudaPeekAtLastError());
scale_dropblock_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>(
state.delta, l->outputs * l->batch, l->outputs,
l->drop_blocks_scale_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
// dropout
else
{
yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK, 0,
get_cuda_stream()>>>(
state.delta, size, l->rand_gpu, l->probability, l->scale);
CHECK_CUDA(cudaPeekAtLastError());
}
}
|
ea8bef202f23fc76aa4c436c039a3e5be3f589c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <mpi.h>
#include <omp.h>
#include <opencv2/opencv.hpp>
#include <cmath>
using namespace std;
using namespace cv;
void cudaFatal(hipError_t error)
{
if (error != hipSuccess) {
fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
}
//retorna a posicao no voetor correspondente a uma coordenada i,j fornecida
__device__ int posCalc(int channel,int i, int j,int qntCols,int imgSize){
return (imgSize*channel+i*qntCols+j);
}
//retorna o canal correspondente a uma posicao no vtor
__host__ int idxCalcC(int pos,int qntCols,int imgSize){
return (floorf(pos*1.0/imgSize));
}
//retorna a coordenada J correspondente a uma posicao no vtor
__host__ int idxCalcJ(int pos,int qntCols,int imgSize){
return (pos%qntCols);
}
//retorna a coordenada I correspondente a uma posicao no vtor
__host__ int idxCalcI(int pos,int qntCols,int imgSize,int channelNum){
int qntLinhasPorCanal = (imgSize/qntCols);
// return ((int)floor(pos*1.0/qntLinhasPorCanal))%qntLinhasPorCanal;
return ((int)floor(pos*1.0/qntCols))%qntLinhasPorCanal;
}
__global__ void smooth(unsigned char *d_inImage,unsigned char *d_outImage,int *d_blocoQnt,int *d_blocoOffset,
int imgSizeIn,int imgSizeOut,int imgLinOut,int imgColIn,int imgColOut,int channelNum,int border){
int offsetBloco;
int bId= blockIdx.x;
int qntLinBloco; //talvez n precise
int offsetThread;
int qntLinThread=0;
int tId = threadIdx.x;
int c=-1;
// int offsetCanal;
//0
//171
//342
//512
// for(int i=0;i<channelNum;i++){
// printf("%d ",d_blocoOffset[i]);
// }
for(int i=0;i<channelNum;i++){
if(bId>=d_blocoOffset[i] && bId<d_blocoOffset[i+1]){
c=i;
break;
}
}
if(c<0 || c>=channelNum){
return;
}
// if(c==3){
// c=2;
// }
bId=bId-d_blocoOffset[c];
// Quantidade total de linhas / qntde de blocos naquele canal
if(bId<(imgLinOut%d_blocoQnt[c])){
offsetBloco= bId*floorf(1.0*imgLinOut/d_blocoQnt[c]) + bId;
}else{
offsetBloco= bId*floorf(1.0*imgLinOut/d_blocoQnt[c]) + imgLinOut%d_blocoQnt[c];
}
if(bId<imgLinOut%d_blocoQnt[c]){
qntLinBloco=ceilf(1.0*imgLinOut/d_blocoQnt[c]);
}else{
qntLinBloco=floorf(1.0*imgLinOut/d_blocoQnt[c]);
}
// Quantidade total de linhas em um bloco / quantiade de threads naquele bloco
if(tId<qntLinBloco%blockDim.x){
offsetThread=tId*floorf(1.0*qntLinBloco/blockDim.x)+tId;
}else{
offsetThread=tId*floorf(1.0*qntLinBloco/blockDim.x)+qntLinBloco%blockDim.x;
}
if(tId<qntLinBloco%blockDim.x){
qntLinThread=ceilf(1.0*qntLinBloco/blockDim.x);
}else{
qntLinThread=floorf(1.0*qntLinBloco/blockDim.x);
}
//pegando o offset do primeiro bloco do canal
// Quantidade total de linhas / qntde de blocos naquele canal
int offsetPrimeiroBloco;
if(d_blocoOffset[c]<(imgLinOut%d_blocoQnt[c])){
offsetPrimeiroBloco= d_blocoOffset[c]*floorf(1.0*imgLinOut/d_blocoQnt[c]) + d_blocoOffset[c];
}else{
offsetPrimeiroBloco= d_blocoOffset[c]*floorf(1.0*imgLinOut/d_blocoQnt[c]) + imgLinOut%d_blocoQnt[c];
}
// int linStart=offsetThread+offsetBloco;
// int linStart=offsetThread+offsetBloco-offsetPrimeiroBloco;
int linStart=offsetThread+offsetBloco;
float sum;
// printf("BlockID: %d ThreadID: %d C: %d offsetBloco: %d qntLinBloco: %d offsetThread %d qntLinThread %d linstart: %d\n",bId,tId,c,offsetBloco,qntLinBloco,offsetThread,qntLinThread,linStart);
for(int i=linStart;i<linStart+qntLinThread;++i){
for(int j=0;j<imgColOut;++j){
sum=0;
for(int l=-border;l<=border;l++){
for(int k=-border;k<=border;k++){
sum+=d_inImage[posCalc(c,i+l+border,j+k+border,imgColIn,imgSizeIn)];
}
}
sum=sum/(((2.0*border)+1)*((2.0*border)+1));
d_outImage[posCalc(c,i,j,imgColOut,imgSizeOut)]=sum;
}
}
}
// void smooth(unsigned char *img,unsigned char *output,int qntLinhas,int qntCols,int size_com_borda,int offset,int order);
void diffTimeSpec(struct timespec start,struct timespec end,struct timespec *temp);
int main(int argc,char** argv){
int NTHREADS = 3; // Inicializando com 3 Threads, mas sera alterado
int NBLOCOS = 15; //Inicializando com 15 blocos, mas sera alterado
//define a quantidade de blocos de acordo com o numero de processos gerados. Tal numero esta relacionado com a quantidade de ghosts no arquivo de host
//elimina um noh, pois eh o master. EM nossa implementacao, o master nao realiza processamento da imagem
if(argc>=5){
NTHREADS=atoi(argv[3]);
//cout<<NTHREADS<<endl;
NBLOCOS=atoi(argv[4]);
}
int border = 2; //relativo ao tamanho do kernel utilizado. Valor 2 deve ser utilizado para kernel de tamanho 5
//noh master que disponibiliza recursos para as outras
if(NBLOCOS<=0){
cout <<"E necessario pelo menos um processo alem do master"<<endl;
return -1;
}
Mat image;
if(argc!=5){
printf("entrar com nome do arquivo, Tipo de imagem e numero de threads\n");
return -1;
}
int tipoImagem = atoi(argv[2]);
//abrindo a imagem
if(tipoImagem == 1){ //caso seja RGB
image=imread(argv[1],CV_LOAD_IMAGE_COLOR);
}else if(tipoImagem==2){ //caso seja grayScale
image=imread(argv[1],CV_LOAD_IMAGE_GRAYSCALE);
}
if(!image.data){
printf("No data read.\n");
return -1;
}
//armazena as dimensoes originais (sem borda) das imagens
int rowMat = image.rows;
int colsMat = image.cols;
int deptMat = image.depth();
//cria a imagem de saida
Mat outputMat(rowMat,colsMat,deptMat);
struct timespec begin, end,result;
double time_spent;
clock_gettime(CLOCK_REALTIME,&begin);
int channelNum= image.channels();
Mat channels[3];
//inserindo borda na imagem
copyMakeBorder(image,image,border,border,border,border,BORDER_CONSTANT,Scalar(0,0,0));
Mat outB(image.rows-border*2,image.cols-border*2,image.depth());
channels[0]=outB;
if(channelNum==3){ //caso seja RGB, cria os canas adicionais
Mat outG(image.rows-border*2,image.cols-border*2,image.depth());
Mat outR(image.rows-border*2,image.cols-border*2,image.depth());
channels[1]=outG;
channels[2]=outR;
}
//divisao do trabalho
unsigned char *d_outImage,*d_inImage;
int *d_blocoOffset,*d_blocoQnt;
unsigned char *h_outImage,*h_inImage;
int *h_blocoOffset,*h_blocoQnt;
int sizeIn = image.rows*image.cols*channelNum*sizeof(unsigned char);
int sizeOut = rowMat*colsMat*channelNum*sizeof(unsigned char);
h_inImage = (unsigned char*) malloc(sizeIn);
h_outImage = (unsigned char*) malloc(sizeOut);
h_blocoQnt = (int*) malloc((channelNum+1)*sizeof(int));
h_blocoOffset = (int*) malloc((channelNum+1)*sizeof(int));
cudaFatal(hipMalloc(&d_inImage,sizeIn));
cudaFatal(hipMalloc(&d_outImage,sizeOut));
cudaFatal(hipMalloc(&d_blocoQnt,(channelNum+1)*sizeof(int)));
cudaFatal(hipMalloc(&d_blocoOffset,(channelNum+1)*sizeof(int)));
int k=0;
for (int c=0;c<channelNum;c++){
for(int i=0;i<image.rows;i++){
for (int j = 0; j < image.cols; ++j){
if(channelNum==3){
h_inImage[k++] = image.at<Vec3b>(i,j).val[c];
}else{
h_inImage[k++] = image.at<uchar>(i,j);
}
}
}
}
//512 / 3 =170
//512 % 3 = 2
//0
//171
//342
//512
for(int i=0;i<channelNum;++i){
if(i< NBLOCOS%channelNum){
h_blocoOffset[i]=i*floor(1.0*NBLOCOS/channelNum) + i;
h_blocoQnt[i]=ceil(1.0*NBLOCOS/channelNum);
}else{
h_blocoOffset[i]=i*floor(1.0*NBLOCOS/channelNum) + NBLOCOS%channelNum;
h_blocoQnt[i]= floor(1.0*NBLOCOS/channelNum);
}
// printf("%d ",h_blocoOffset[i]);
}
// printf("NBLOCOS: %d NTHREADS: %d\n",NBLOCOS,NTHREADS);
int imgLinOut = outputMat.rows;
h_blocoOffset[channelNum] = NBLOCOS;
// for(int i=0;i<channelNum+1;i++){
// printf("%d ",h_blocoOffset[i]);
// }
// printf("\n");
cudaFatal(hipMemcpy(d_inImage,h_inImage,sizeIn,hipMemcpyHostToDevice));
cudaFatal(hipMemcpy(d_blocoOffset,h_blocoOffset,(channelNum+1)*sizeof(int),hipMemcpyHostToDevice));
cudaFatal(hipMemcpy(d_blocoQnt,h_blocoQnt,(channelNum+1)*sizeof(int),hipMemcpyHostToDevice));
int imgSizeIn = image.rows*image.cols;
int imgSizeOut = rowMat*colsMat;
int imgColIn = image.cols;
int imgColOut = colsMat;
dim3 griSize(NBLOCOS,1,1);
dim3 blockSize(NTHREADS,1,1);
hipLaunchKernelGGL(( smooth), dim3(griSize),dim3(blockSize), 0, 0, d_inImage,d_outImage,d_blocoQnt,d_blocoOffset,imgSizeIn,imgSizeOut,imgLinOut,imgColIn,imgColOut,channelNum,border);
cudaFatal(hipDeviceSynchronize());
// smooth(unsigned char *d_inImage,unsigned char *d_outImage,unsigned char *d_blocoQnt,unsigned char *d_blocoOffset,
// int imgSizeIn,int imgSizeOut,int imgLinOut,int imgColIn,int imgColOut,int channelNum,int border)
cudaFatal(hipMemcpy(h_outImage,d_outImage,sizeOut,hipMemcpyDeviceToHost));
int auxC,auxI,auxJ;
k=0;
for (int c=0;c<channelNum;c++){
for(int i=0;i<rowMat;i++){
for(int j=0;j<colsMat;j++){
if(channelNum==3){
// outputMat.at<Vec3b>(l,k).val[vetorIdCanal[i]]=partitionedBlockRcv[y++];
channels[c].at<uchar>(i,j)=(uchar)h_outImage[k++];
// cout<<vetorIdCanal[i]<<endl;
}else{
outputMat.at<uchar>(i,j)=(uchar)h_outImage[k++];
}
}
}
}
// for(int k=0;k<(sizeOut/sizeof(unsigned char));k++){
// auxC=idxCalcC(k,imgColOut,imgSizeOut);
// auxI=idxCalcI(k,imgColOut,imgSizeOut,channelNum);
// auxJ=idxCalcJ(k,imgColOut,imgSizeOut);
// // printf("C: %d I: %d J: %d\n",auxC,auxI,auxJ);
// if(channelNum==3){
// // outputMat.at<Vec3b>(l,k).val[vetorIdCanal[i]]=partitionedBlockRcv[y++];
// channels[auxC].at<uchar>(auxI,auxJ)=(uchar)h_outImage[k];
// // cout<<vetorIdCanal[i]<<endl;
// }else{
// outputMat.at<uchar>(auxI,auxJ)=(uchar)h_outImage[k];
// }
// }
if (channelNum==3){
merge(channels,channelNum,outputMat);
}
clock_gettime(CLOCK_REALTIME,&end);
//Calculo tempo de execucao
diffTimeSpec(begin, end, &result);
//time_spent=((double) difftime(end.tv_sec,begin.tv_sec))+(result.tv_nsec*1.0/1000000000.0);
time_spent=((double) result.tv_sec)+(result.tv_nsec*1.0/1000000000.0);
// namedWindow("Orginal",WINDOW_NORMAL);
// namedWindow("Resultado",WINDOW_NORMAL);
// imshow("Original",image);
// imshow("Resultado",outputMat);
// waitKey(0);
// cout << "Nome imagem: "<< argv[1] <<endl;
std::string inFileName(argv[1]);
cout<<inFileName<<"\t"<<NBLOCOS<<"\t"<<NTHREADS<<"\t"<<time_spent<<endl;
std::string outFileName = inFileName.substr(0,inFileName.find_last_of("."));
outFileName += ".ppm";
imwrite(outFileName,outputMat);
//imwrite("../canal0.ppm",channels[0]);
//imwrite("../canal1.ppm",channels[1]);
//imwrite("../canal2.ppm",channels[2]);
//imwrite("OIEEEEEEEE.ppm",image);
free(h_outImage);
free(h_inImage);
cudaFatal(hipFree(d_outImage));
cudaFatal(hipFree(d_inImage));
}
void diffTimeSpec(struct timespec start,struct timespec end,struct timespec *temp)
{
if ((end.tv_nsec-start.tv_nsec)<0) {
temp->tv_sec = end.tv_sec-start.tv_sec-1;
temp->tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp->tv_sec = end.tv_sec-start.tv_sec;
temp->tv_nsec = end.tv_nsec-start.tv_nsec;
}
return;
}
//relaiza o calculo da media
// inline
// unsigned char calculaMedia(unsigned char *in,unsigned char *out,int qntLinhas,int qntCols,int p,int border){
// int sum=0;
// int i= idxCalcI(p,qntCols);
// int j= idxCalcJ(p,qntCols);
// for(int k=-border;k<=border;k++){
// for(int l=-border;l<=border;l++){
// sum+=in[posCalc(i+k,j+l,qntCols)] ;
// }
// }
// return (unsigned char)(sum/pow(border*2+1,2));
// }
// void smooth(unsigned char *img,unsigned char *output,int qntLinhas,int qntCols,int size_com_borda,int offset,int border){
// //TODO: colocar bordas de acordo com necessidade
// int sum;
// // Mat imgWithBorder(img,Rect(border,border,img.rows,img.cols));
// int lastPos = posCalc(qntLinhas+2*border-1,qntCols-1,qntCols)+1;
// int k=0;
// int auxi,auxj;
// unsigned char auxDebug;
// //cout<< "Retorno do smooth: ";
// #pragma omp parallel //shared(output)
// {
// #pragma omp for schedule(dynamic) private(auxi,auxj,auxDebug)
// //processa o vetor. Como a matriz foi colocada em formato de vetor, eh necessario um for so, mas fora utilizadas fucoes para converter os indces
// for(int i=posCalc(border,border,qntCols);i<lastPos;i++){
// auxi=idxCalcI(i,qntCols);
// auxj=idxCalcJ(i,qntCols);
// //cout<< auxi << " "<<auxj<<endl;
// if(auxi>=border && auxi<qntLinhas+2*border-border && auxj>=border && auxj<qntCols-border){ // se e um pixel valido da imagem
// auxDebug=calculaMedia(img,output,qntLinhas,qntCols,i,border);
// output[posCalc(auxi-border,auxj-border,qntCols-2*border)]=auxDebug;
// }
// }
// }
// }
| ea8bef202f23fc76aa4c436c039a3e5be3f589c8.cu | #include <cstdlib>
#include <cstdio>
#include <iostream>
#include <mpi.h>
#include <omp.h>
#include <opencv2/opencv.hpp>
#include <cmath>
using namespace std;
using namespace cv;
void cudaFatal(cudaError_t error)
{
if (error != cudaSuccess) {
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
//retorna a posicao no voetor correspondente a uma coordenada i,j fornecida
__device__ int posCalc(int channel,int i, int j,int qntCols,int imgSize){
return (imgSize*channel+i*qntCols+j);
}
//retorna o canal correspondente a uma posicao no vtor
__host__ int idxCalcC(int pos,int qntCols,int imgSize){
return (floorf(pos*1.0/imgSize));
}
//retorna a coordenada J correspondente a uma posicao no vtor
__host__ int idxCalcJ(int pos,int qntCols,int imgSize){
return (pos%qntCols);
}
//retorna a coordenada I correspondente a uma posicao no vtor
__host__ int idxCalcI(int pos,int qntCols,int imgSize,int channelNum){
int qntLinhasPorCanal = (imgSize/qntCols);
// return ((int)floor(pos*1.0/qntLinhasPorCanal))%qntLinhasPorCanal;
return ((int)floor(pos*1.0/qntCols))%qntLinhasPorCanal;
}
__global__ void smooth(unsigned char *d_inImage,unsigned char *d_outImage,int *d_blocoQnt,int *d_blocoOffset,
int imgSizeIn,int imgSizeOut,int imgLinOut,int imgColIn,int imgColOut,int channelNum,int border){
int offsetBloco;
int bId= blockIdx.x;
int qntLinBloco; //talvez n precise
int offsetThread;
int qntLinThread=0;
int tId = threadIdx.x;
int c=-1;
// int offsetCanal;
//0
//171
//342
//512
// for(int i=0;i<channelNum;i++){
// printf("%d ",d_blocoOffset[i]);
// }
for(int i=0;i<channelNum;i++){
if(bId>=d_blocoOffset[i] && bId<d_blocoOffset[i+1]){
c=i;
break;
}
}
if(c<0 || c>=channelNum){
return;
}
// if(c==3){
// c=2;
// }
bId=bId-d_blocoOffset[c];
// Quantidade total de linhas / qntde de blocos naquele canal
if(bId<(imgLinOut%d_blocoQnt[c])){
offsetBloco= bId*floorf(1.0*imgLinOut/d_blocoQnt[c]) + bId;
}else{
offsetBloco= bId*floorf(1.0*imgLinOut/d_blocoQnt[c]) + imgLinOut%d_blocoQnt[c];
}
if(bId<imgLinOut%d_blocoQnt[c]){
qntLinBloco=ceilf(1.0*imgLinOut/d_blocoQnt[c]);
}else{
qntLinBloco=floorf(1.0*imgLinOut/d_blocoQnt[c]);
}
// Quantidade total de linhas em um bloco / quantiade de threads naquele bloco
if(tId<qntLinBloco%blockDim.x){
offsetThread=tId*floorf(1.0*qntLinBloco/blockDim.x)+tId;
}else{
offsetThread=tId*floorf(1.0*qntLinBloco/blockDim.x)+qntLinBloco%blockDim.x;
}
if(tId<qntLinBloco%blockDim.x){
qntLinThread=ceilf(1.0*qntLinBloco/blockDim.x);
}else{
qntLinThread=floorf(1.0*qntLinBloco/blockDim.x);
}
//pegando o offset do primeiro bloco do canal
// Quantidade total de linhas / qntde de blocos naquele canal
int offsetPrimeiroBloco;
if(d_blocoOffset[c]<(imgLinOut%d_blocoQnt[c])){
offsetPrimeiroBloco= d_blocoOffset[c]*floorf(1.0*imgLinOut/d_blocoQnt[c]) + d_blocoOffset[c];
}else{
offsetPrimeiroBloco= d_blocoOffset[c]*floorf(1.0*imgLinOut/d_blocoQnt[c]) + imgLinOut%d_blocoQnt[c];
}
// int linStart=offsetThread+offsetBloco;
// int linStart=offsetThread+offsetBloco-offsetPrimeiroBloco;
int linStart=offsetThread+offsetBloco;
float sum;
// printf("BlockID: %d ThreadID: %d C: %d offsetBloco: %d qntLinBloco: %d offsetThread %d qntLinThread %d linstart: %d\n",bId,tId,c,offsetBloco,qntLinBloco,offsetThread,qntLinThread,linStart);
for(int i=linStart;i<linStart+qntLinThread;++i){
for(int j=0;j<imgColOut;++j){
sum=0;
for(int l=-border;l<=border;l++){
for(int k=-border;k<=border;k++){
sum+=d_inImage[posCalc(c,i+l+border,j+k+border,imgColIn,imgSizeIn)];
}
}
sum=sum/(((2.0*border)+1)*((2.0*border)+1));
d_outImage[posCalc(c,i,j,imgColOut,imgSizeOut)]=sum;
}
}
}
// void smooth(unsigned char *img,unsigned char *output,int qntLinhas,int qntCols,int size_com_borda,int offset,int order);
void diffTimeSpec(struct timespec start,struct timespec end,struct timespec *temp);
int main(int argc,char** argv){
int NTHREADS = 3; // Inicializando com 3 Threads, mas sera alterado
int NBLOCOS = 15; //Inicializando com 15 blocos, mas sera alterado
//define a quantidade de blocos de acordo com o numero de processos gerados. Tal numero esta relacionado com a quantidade de ghosts no arquivo de host
//elimina um noh, pois eh o master. EM nossa implementacao, o master nao realiza processamento da imagem
if(argc>=5){
NTHREADS=atoi(argv[3]);
//cout<<NTHREADS<<endl;
NBLOCOS=atoi(argv[4]);
}
int border = 2; //relativo ao tamanho do kernel utilizado. Valor 2 deve ser utilizado para kernel de tamanho 5
//noh master que disponibiliza recursos para as outras
if(NBLOCOS<=0){
cout <<"E necessario pelo menos um processo alem do master"<<endl;
return -1;
}
Mat image;
if(argc!=5){
printf("entrar com nome do arquivo, Tipo de imagem e numero de threads\n");
return -1;
}
int tipoImagem = atoi(argv[2]);
//abrindo a imagem
if(tipoImagem == 1){ //caso seja RGB
image=imread(argv[1],CV_LOAD_IMAGE_COLOR);
}else if(tipoImagem==2){ //caso seja grayScale
image=imread(argv[1],CV_LOAD_IMAGE_GRAYSCALE);
}
if(!image.data){
printf("No data read.\n");
return -1;
}
//armazena as dimensoes originais (sem borda) das imagens
int rowMat = image.rows;
int colsMat = image.cols;
int deptMat = image.depth();
//cria a imagem de saida
Mat outputMat(rowMat,colsMat,deptMat);
struct timespec begin, end,result;
double time_spent;
clock_gettime(CLOCK_REALTIME,&begin);
int channelNum= image.channels();
Mat channels[3];
//inserindo borda na imagem
copyMakeBorder(image,image,border,border,border,border,BORDER_CONSTANT,Scalar(0,0,0));
Mat outB(image.rows-border*2,image.cols-border*2,image.depth());
channels[0]=outB;
if(channelNum==3){ //caso seja RGB, cria os canas adicionais
Mat outG(image.rows-border*2,image.cols-border*2,image.depth());
Mat outR(image.rows-border*2,image.cols-border*2,image.depth());
channels[1]=outG;
channels[2]=outR;
}
//divisao do trabalho
unsigned char *d_outImage,*d_inImage;
int *d_blocoOffset,*d_blocoQnt;
unsigned char *h_outImage,*h_inImage;
int *h_blocoOffset,*h_blocoQnt;
int sizeIn = image.rows*image.cols*channelNum*sizeof(unsigned char);
int sizeOut = rowMat*colsMat*channelNum*sizeof(unsigned char);
h_inImage = (unsigned char*) malloc(sizeIn);
h_outImage = (unsigned char*) malloc(sizeOut);
h_blocoQnt = (int*) malloc((channelNum+1)*sizeof(int));
h_blocoOffset = (int*) malloc((channelNum+1)*sizeof(int));
cudaFatal(cudaMalloc(&d_inImage,sizeIn));
cudaFatal(cudaMalloc(&d_outImage,sizeOut));
cudaFatal(cudaMalloc(&d_blocoQnt,(channelNum+1)*sizeof(int)));
cudaFatal(cudaMalloc(&d_blocoOffset,(channelNum+1)*sizeof(int)));
int k=0;
for (int c=0;c<channelNum;c++){
for(int i=0;i<image.rows;i++){
for (int j = 0; j < image.cols; ++j){
if(channelNum==3){
h_inImage[k++] = image.at<Vec3b>(i,j).val[c];
}else{
h_inImage[k++] = image.at<uchar>(i,j);
}
}
}
}
//512 / 3 =170
//512 % 3 = 2
//0
//171
//342
//512
for(int i=0;i<channelNum;++i){
if(i< NBLOCOS%channelNum){
h_blocoOffset[i]=i*floor(1.0*NBLOCOS/channelNum) + i;
h_blocoQnt[i]=ceil(1.0*NBLOCOS/channelNum);
}else{
h_blocoOffset[i]=i*floor(1.0*NBLOCOS/channelNum) + NBLOCOS%channelNum;
h_blocoQnt[i]= floor(1.0*NBLOCOS/channelNum);
}
// printf("%d ",h_blocoOffset[i]);
}
// printf("NBLOCOS: %d NTHREADS: %d\n",NBLOCOS,NTHREADS);
int imgLinOut = outputMat.rows;
h_blocoOffset[channelNum] = NBLOCOS;
// for(int i=0;i<channelNum+1;i++){
// printf("%d ",h_blocoOffset[i]);
// }
// printf("\n");
cudaFatal(cudaMemcpy(d_inImage,h_inImage,sizeIn,cudaMemcpyHostToDevice));
cudaFatal(cudaMemcpy(d_blocoOffset,h_blocoOffset,(channelNum+1)*sizeof(int),cudaMemcpyHostToDevice));
cudaFatal(cudaMemcpy(d_blocoQnt,h_blocoQnt,(channelNum+1)*sizeof(int),cudaMemcpyHostToDevice));
int imgSizeIn = image.rows*image.cols;
int imgSizeOut = rowMat*colsMat;
int imgColIn = image.cols;
int imgColOut = colsMat;
dim3 griSize(NBLOCOS,1,1);
dim3 blockSize(NTHREADS,1,1);
smooth<<<griSize,blockSize>>>(d_inImage,d_outImage,d_blocoQnt,d_blocoOffset,imgSizeIn,imgSizeOut,imgLinOut,imgColIn,imgColOut,channelNum,border);
cudaFatal(cudaThreadSynchronize());
// smooth(unsigned char *d_inImage,unsigned char *d_outImage,unsigned char *d_blocoQnt,unsigned char *d_blocoOffset,
// int imgSizeIn,int imgSizeOut,int imgLinOut,int imgColIn,int imgColOut,int channelNum,int border)
cudaFatal(cudaMemcpy(h_outImage,d_outImage,sizeOut,cudaMemcpyDeviceToHost));
int auxC,auxI,auxJ;
k=0;
for (int c=0;c<channelNum;c++){
for(int i=0;i<rowMat;i++){
for(int j=0;j<colsMat;j++){
if(channelNum==3){
// outputMat.at<Vec3b>(l,k).val[vetorIdCanal[i]]=partitionedBlockRcv[y++];
channels[c].at<uchar>(i,j)=(uchar)h_outImage[k++];
// cout<<vetorIdCanal[i]<<endl;
}else{
outputMat.at<uchar>(i,j)=(uchar)h_outImage[k++];
}
}
}
}
// for(int k=0;k<(sizeOut/sizeof(unsigned char));k++){
// auxC=idxCalcC(k,imgColOut,imgSizeOut);
// auxI=idxCalcI(k,imgColOut,imgSizeOut,channelNum);
// auxJ=idxCalcJ(k,imgColOut,imgSizeOut);
// // printf("C: %d I: %d J: %d\n",auxC,auxI,auxJ);
// if(channelNum==3){
// // outputMat.at<Vec3b>(l,k).val[vetorIdCanal[i]]=partitionedBlockRcv[y++];
// channels[auxC].at<uchar>(auxI,auxJ)=(uchar)h_outImage[k];
// // cout<<vetorIdCanal[i]<<endl;
// }else{
// outputMat.at<uchar>(auxI,auxJ)=(uchar)h_outImage[k];
// }
// }
if (channelNum==3){
merge(channels,channelNum,outputMat);
}
clock_gettime(CLOCK_REALTIME,&end);
//Calculo tempo de execucao
diffTimeSpec(begin, end, &result);
//time_spent=((double) difftime(end.tv_sec,begin.tv_sec))+(result.tv_nsec*1.0/1000000000.0);
time_spent=((double) result.tv_sec)+(result.tv_nsec*1.0/1000000000.0);
// namedWindow("Orginal",WINDOW_NORMAL);
// namedWindow("Resultado",WINDOW_NORMAL);
// imshow("Original",image);
// imshow("Resultado",outputMat);
// waitKey(0);
// cout << "Nome imagem: "<< argv[1] <<endl;
std::string inFileName(argv[1]);
cout<<inFileName<<"\t"<<NBLOCOS<<"\t"<<NTHREADS<<"\t"<<time_spent<<endl;
std::string outFileName = inFileName.substr(0,inFileName.find_last_of("."));
outFileName += ".ppm";
imwrite(outFileName,outputMat);
//imwrite("../canal0.ppm",channels[0]);
//imwrite("../canal1.ppm",channels[1]);
//imwrite("../canal2.ppm",channels[2]);
//imwrite("OIEEEEEEEE.ppm",image);
free(h_outImage);
free(h_inImage);
cudaFatal(cudaFree(d_outImage));
cudaFatal(cudaFree(d_inImage));
}
void diffTimeSpec(struct timespec start,struct timespec end,struct timespec *temp)
{
if ((end.tv_nsec-start.tv_nsec)<0) {
temp->tv_sec = end.tv_sec-start.tv_sec-1;
temp->tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp->tv_sec = end.tv_sec-start.tv_sec;
temp->tv_nsec = end.tv_nsec-start.tv_nsec;
}
return;
}
//relaiza o calculo da media
// inline
// unsigned char calculaMedia(unsigned char *in,unsigned char *out,int qntLinhas,int qntCols,int p,int border){
// int sum=0;
// int i= idxCalcI(p,qntCols);
// int j= idxCalcJ(p,qntCols);
// for(int k=-border;k<=border;k++){
// for(int l=-border;l<=border;l++){
// sum+=in[posCalc(i+k,j+l,qntCols)] ;
// }
// }
// return (unsigned char)(sum/pow(border*2+1,2));
// }
// void smooth(unsigned char *img,unsigned char *output,int qntLinhas,int qntCols,int size_com_borda,int offset,int border){
// //TODO: colocar bordas de acordo com necessidade
// int sum;
// // Mat imgWithBorder(img,Rect(border,border,img.rows,img.cols));
// int lastPos = posCalc(qntLinhas+2*border-1,qntCols-1,qntCols)+1;
// int k=0;
// int auxi,auxj;
// unsigned char auxDebug;
// //cout<< "Retorno do smooth: ";
// #pragma omp parallel //shared(output)
// {
// #pragma omp for schedule(dynamic) private(auxi,auxj,auxDebug)
// //processa o vetor. Como a matriz foi colocada em formato de vetor, eh necessario um for so, mas fora utilizadas fucoes para converter os indces
// for(int i=posCalc(border,border,qntCols);i<lastPos;i++){
// auxi=idxCalcI(i,qntCols);
// auxj=idxCalcJ(i,qntCols);
// //cout<< auxi << " "<<auxj<<endl;
// if(auxi>=border && auxi<qntLinhas+2*border-border && auxj>=border && auxj<qntCols-border){ // se e um pixel valido da imagem
// auxDebug=calculaMedia(img,output,qntLinhas,qntCols,i,border);
// output[posCalc(auxi-border,auxj-border,qntCols-2*border)]=auxDebug;
// }
// }
// }
// }
|
e4f8d05b4586176dd6faff7cc2b0169dc242e130.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 19-Oct-2012 16:21:07
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
float *ind_arg0,
float *ind_arg1,
float *ind_arg2,
float *ind_arg3,
int *ind_map,
short *arg_map,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
float arg6_l[4];
float arg7_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ float *ind_arg0_s;
__shared__ float *ind_arg1_s;
__shared__ float *ind_arg2_s;
__shared__ float *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*4];
ind_arg1_map = &ind_map[2*set_size] + ind_arg_offs[1+blockId*4];
ind_arg2_map = &ind_map[4*set_size] + ind_arg_offs[2+blockId*4];
ind_arg3_map = &ind_map[6*set_size] + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2);
ind_arg1_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4);
ind_arg2_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1);
ind_arg3_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_float;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg6_l[d] = ZERO_float;
for (int d=0; d<4; d++)
arg7_l[d] = ZERO_float;
// user-supplied kernel call
res_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2,
ind_arg0_s+arg_map[1*set_size+n+offset_b]*2,
ind_arg1_s+arg_map[2*set_size+n+offset_b]*4,
ind_arg1_s+arg_map[3*set_size+n+offset_b]*4,
ind_arg2_s+arg_map[4*set_size+n+offset_b]*1,
ind_arg2_s+arg_map[5*set_size+n+offset_b]*1,
arg6_l,
arg7_l );
col2 = colors[n+offset_b];
}
// store local variables
int arg6_map;
int arg7_map;
if (col2>=0) {
arg6_map = arg_map[6*set_size+n+offset_b];
arg7_map = arg_map[7*set_size+n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg6_map*4] += arg6_l[d];
for (int d=0; d<4; d++)
ind_arg3_s[d+arg7_map*4] += arg7_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7 ){
int nargs = 8;
op_arg args[8];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
// get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0;
op_timing_realloc(2);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
if (set->size >0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
op_timers_core(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args);
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(float *)arg0.data_d,
(float *)arg2.data_d,
(float *)arg4.data_d,
(float *)arg6.data_d,
Plan->ind_map,
Plan->loc_map,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set_size);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
}
block_offset += Plan->ncolblk[col];
}
op_timing_realloc(2);
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[2].time += wall_t2 - wall_t1;
}
| e4f8d05b4586176dd6faff7cc2b0169dc242e130.cu | //
// auto-generated by op2.m on 19-Oct-2012 16:21:07
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
float *ind_arg0,
float *ind_arg1,
float *ind_arg2,
float *ind_arg3,
int *ind_map,
short *arg_map,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
float arg6_l[4];
float arg7_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ float *ind_arg0_s;
__shared__ float *ind_arg1_s;
__shared__ float *ind_arg2_s;
__shared__ float *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*4];
ind_arg1_map = &ind_map[2*set_size] + ind_arg_offs[1+blockId*4];
ind_arg2_map = &ind_map[4*set_size] + ind_arg_offs[2+blockId*4];
ind_arg3_map = &ind_map[6*set_size] + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2);
ind_arg1_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4);
ind_arg2_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1);
ind_arg3_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_float;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg6_l[d] = ZERO_float;
for (int d=0; d<4; d++)
arg7_l[d] = ZERO_float;
// user-supplied kernel call
res_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2,
ind_arg0_s+arg_map[1*set_size+n+offset_b]*2,
ind_arg1_s+arg_map[2*set_size+n+offset_b]*4,
ind_arg1_s+arg_map[3*set_size+n+offset_b]*4,
ind_arg2_s+arg_map[4*set_size+n+offset_b]*1,
ind_arg2_s+arg_map[5*set_size+n+offset_b]*1,
arg6_l,
arg7_l );
col2 = colors[n+offset_b];
}
// store local variables
int arg6_map;
int arg7_map;
if (col2>=0) {
arg6_map = arg_map[6*set_size+n+offset_b];
arg7_map = arg_map[7*set_size+n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg6_map*4] += arg6_l[d];
for (int d=0; d<4; d++)
ind_arg3_s[d+arg7_map*4] += arg7_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7 ){
int nargs = 8;
op_arg args[8];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
// get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0;
op_timing_realloc(2);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
if (set->size >0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
op_timers_core(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args);
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
op_cuda_res_calc<<<nblocks,nthread,nshared>>>(
(float *)arg0.data_d,
(float *)arg2.data_d,
(float *)arg4.data_d,
(float *)arg6.data_d,
Plan->ind_map,
Plan->loc_map,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set_size);
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
}
block_offset += Plan->ncolblk[col];
}
op_timing_realloc(2);
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[2].time += wall_t2 - wall_t1;
}
|
aca364ba207fa14d72e9fb4abd1210f8c1948afc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include "maskRCNNKernels.h"
#include "plugin.h"
#include <NvInfer.h>
#include <assert.h>
#include <hipcub/hipcub.hpp>
#include <iostream>
#include <stdio.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#define DUBUG_KERNEL 0
#define DUBUG_BATCH 0
#define DEBUG_T 1
#define dMIN(a, b) ((a) < (b) ? (a) : (b))
#define dMAX(a, b) ((a) > (b) ? (a) : (b))
#define dCLAMP(x, xMin, xMax) ((x) > (xMin) ? ((x) < (xMax) ? (x) : (xMax)) : (xMin))
template <typename BoxType>
struct BBoxT
{
BoxType y1, x1, y2, x2;
};
inline __device__ __half mul_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a * b;
#else
return __float2half(__half2float(a) * __half2float(b));
#endif
}
inline __device__ __half add_fb(const __half & a, const half & b) {
#if __CUDA_ARCH__ >= 530
return a + b;
#else
return __float2half(__half2float(a) + __half2float(b));
#endif
}
template <typename DType>
__global__ void argMaxReset_kernel(
int samples, int NClass, const DType* in_scores, const int* maxIdx, DType* out_scores)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int max_idx = samples * NClass;
if (idx >= max_idx)
return;
int sampleIdx = idx / NClass;
int classIdx = idx % NClass;
if (classIdx != maxIdx[sampleIdx])
out_scores[idx] = 0;
else
out_scores[idx] = in_scores[idx];
}
template <typename DType>
struct ScanItem
{
DType data;
int idx;
};
template <typename DType>
struct GreaterItem
{
__host__ __device__ __forceinline__ ScanItem<DType> operator()(
const ScanItem<DType>& a, const ScanItem<DType>& b) const
{
return (a.data > b.data ? a : b);
}
};
template <typename DType>
__global__ void resetMemValue_kernel(void* outPtr, int samples, float val)
{
DType* out = static_cast<DType*>(outPtr);
int loop = gridDim.x * blockDim.x;
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < samples; idx += loop)
{
out[idx] = (DType) val;
}
}
template <>
__global__ void resetMemValue_kernel<half>(void* outPtr, int samples, float val)
{
__half* out = static_cast<__half*>(outPtr);
int loop = gridDim.x * blockDim.x;
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < samples; idx += loop)
{
out[idx] = __float2half(val);
}
}
// blockDim.x : NClass
// GroupDim.x : sample count
// GroupDim.y : batch N
// outScore : DType[ N * sample * 1 ]
// outLabel : int[ N * sample * 1 ]
// outBbox : int[ N * sample * 4 ]
template <typename DType, typename BoxType, int Threads = 32>
__global__ void argMaxGroup_kernel(int samples, int start_class_id, int NClass, const void* inScorePtr,
const void* inBboxPtr, const void* validSampleCountPtr, void* outScorePtr, void* outLabelPtr, void* outBboxPtr)
{
const DType* inScore = static_cast<const DType*>(inScorePtr);
const BoxType* inBbox = static_cast<const BoxType*>(inBboxPtr);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
DType* outScore = static_cast<DType*>(outScorePtr);
BoxType* outLabel = static_cast<BoxType*>(outLabelPtr);
BoxType* outBbox = static_cast<BoxType*>(outBboxPtr);
const int N = blockIdx.y;
const int validSamples = validSampleCount[N];
typedef ScanItem<DType> ScanItemD;
typedef hipcub::BlockReduce<ScanItemD, Threads> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int iSample = blockIdx.x; iSample < validSamples; iSample += gridDim.x)
{
int classOffset = (N * samples + iSample) * NClass; // start from [batch, count, class0]
// total IPerThread * blockDim
ScanItemD maxItem = {0.0f, -1};
for (int i = start_class_id; i < NClass; i += Threads)
{
int curIdx = i + threadIdx.x;
ScanItemD item = {0.0f, -1};
if (curIdx < NClass)
{
item.data = inScore[classOffset + curIdx];
item.idx = curIdx;
}
const int validNum = (NClass - i > Threads ? Threads : NClass - i);
ScanItemD aggregate = BlockReduce(temp_storage).Reduce(item, GreaterItem<DType>(), validNum);
__syncthreads();
if (aggregate.data > maxItem.data)
{
maxItem = aggregate;
}
#if DUBUG_KERNEL
if (N == DUBUG_BATCH && threadIdx.x == 0 && iSample < 15 /*&& maxItem.idx >= 32*/)
{
printf("argMaxGroup N:%d, iSample:%d, maxItem(score:%.3f, idx:%d)validReduceNum:%d\n", N, iSample,
(float) maxItem.data, maxItem.idx, validNum);
}
#endif
}
const int dstOffset = N * samples + iSample;
if (threadIdx.x == 0)
{
outScore[dstOffset] = maxItem.data;
outLabel[dstOffset] = (BoxType) maxItem.idx;
outBbox[dstOffset * 4] = inBbox[(classOffset + maxItem.idx) * 4];
outBbox[dstOffset * 4 + 1] = inBbox[(classOffset + maxItem.idx) * 4 + 1];
outBbox[dstOffset * 4 + 2] = inBbox[(classOffset + maxItem.idx) * 4 + 2];
outBbox[dstOffset * 4 + 3] = inBbox[(classOffset + maxItem.idx) * 4 + 3];
}
}
}
struct BlockClassSumPrefix
{
int total;
// Constructor
__device__ BlockClassSumPrefix()
: total(0)
{
}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide scan.
__device__ int operator()(int aggregate)
{
int old = total;
total += aggregate;
return old;
}
};
#define LabelShift (2.5f)
#define MinValidScore (0.01f)
#define ScoreShift (1.0f)
template <typename DType>
__device__ __forceinline__ DType getKey(DType score, int lable, int NClass)
{
return (lable < 0 ? (DType) 0 : ((DType)(NClass - lable - 1) * LabelShift + score + ScoreShift));
}
template <typename DType, typename BoxType>
__device__ __forceinline__ void getScoreLable(DType key, int NClass, DType& score, BoxType& lable)
{
int i = key / LabelShift;
score = (key <= ScoreShift ? (DType) 0 : key - (DType) i * LabelShift - ScoreShift);
score = dCLAMP(score, (DType) 0, (DType) 1.0);
lable = (BoxType)(key <= ScoreShift ? -1 : (NClass - i - 1));
}
// blockDim.x : threads
// gridDim.x : batch N
// validSampleCount INPUT : int [N]
// classStartPos OUTPUT: int [N * (Class + 1)], need memset to zero before this kernel
// outScore OUTPUT : DType [N * samples]
// outLabel OUTPUT : int [N * samples]
// outSampleIdx OUTPUT : int [N * samples]
// outValidSampleCount : int [N]
// IPerThread * Threads >= sample-count
#define MaxClassNum 255
template <typename DType, typename BoxType, int Threads = 256, int IPerThread = 4>
__global__ void sortPerClass_kernel(
// int N,
int samples, int NClass, int background, float scoreThreshold, const void* validSampleCountPtr,
const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, void* classStartPosPtr, void* outScorePtr,
void* outLabelPtr, void* outSampleIdxPtr, void* outValidSampleCountPtr)
{
typedef cub::BlockExchange<DType, Threads, IPerThread> BlockExchangeKey;
typedef cub::BlockExchange<int, Threads, IPerThread> BlockExchangeI;
typedef cub::BlockRadixSort<DType, Threads, IPerThread, int> BlockRadixSort;
typedef hipcub::BlockScan<int, Threads> BlockScanClass;
__shared__ union
{
typename BlockExchangeKey::TempStorage storageKey;
typename BlockExchangeI::TempStorage storageI;
typename BlockRadixSort::TempStorage storageSort;
typename BlockScanClass::TempStorage storageScan;
} temp_storage;
__shared__ int smemClassCount[MaxClassNum];
assert(NClass < MaxClassNum);
assert(IPerThread * Threads >= samples);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
const DType* inScore = static_cast<const DType*>(inScorePtr);
const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr);
int* classStartPos = static_cast<int*>(classStartPosPtr);
DType* outScore = static_cast<DType*>(outScorePtr);
BoxType* outLabel = static_cast<BoxType*>(outLabelPtr);
int* outSampleIdx = static_cast<int*>(outSampleIdxPtr);
int* outValidSampleCount = static_cast<int*>(outValidSampleCountPtr);
for (int s = threadIdx.x; s < NClass + 1; s += blockDim.x)
{
smemClassCount[s] = 0;
}
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
DType key[IPerThread];
int iSample[IPerThread];
for (int i = 0; i < IPerThread; ++i)
{
iSample[i] = -1;
key[i] = -1.0f;
int curIdx = i * Threads + threadIdx.x;
if (curIdx < validSamples)
{
int label = (int) (inLabel[blockOffset + curIdx]);
DType score = inScore[blockOffset + curIdx];
if (label != background && label != -1 && score >= scoreThreshold)
{
key[i] = getKey(score, label, NClass);
iSample[i] = curIdx;
}
}
}
BlockExchangeKey(temp_storage.storageKey).StripedToBlocked(key);
__syncthreads();
BlockExchangeI(temp_storage.storageI).StripedToBlocked(iSample);
__syncthreads();
BlockRadixSort(temp_storage.storageSort).SortDescendingBlockedToStriped(key, iSample);
__syncthreads();
// store Idx
cub::StoreDirectStriped<Threads>(threadIdx.x, outSampleIdx + blockOffset, iSample, validSamples);
BoxType lable[IPerThread];
DType score[IPerThread];
#pragma unroll
for (int i = 0; i < IPerThread; ++i)
{
getScoreLable(key[i], NClass, score[i], lable[i]);
}
cub::StoreDirectStriped<Threads>(threadIdx.x, outScore + blockOffset, score, validSamples);
cub::StoreDirectStriped<Threads>(threadIdx.x, outLabel + blockOffset, lable, validSamples);
// final
for (int i = 0; i < IPerThread; ++i)
{
if (lable[i] >= (BoxType) 0)
{
atomicAdd(&smemClassCount[(int) lable[i]], 1);
}
}
__syncthreads();
int classBlockOffset = N * (NClass + 1); // Exclusive-sum, 1st is 0, last is final sum
#if DUBUG_KERNEL
if (N == DUBUG_BATCH && threadIdx.x == 0)
{
printf("sortPerClass(N:%d) final count of each label, valid samples:%d\n", N, validSamples);
for (int k = 0; k < NClass; ++k)
{
if (smemClassCount[k] > 0)
printf("Batch:%d, L:%d, count:%d, \n", N, k, smemClassCount[k]);
}
}
__syncthreads();
#endif
BlockClassSumPrefix sumPrefix;
for (int s = 0; s < NClass; s += blockDim.x)
{ // s start from block
int iClassSamples = 0;
int iClass = s + threadIdx.x;
if (iClass < NClass)
{
iClassSamples = smemClassCount[iClass];
}
BlockScanClass(temp_storage.storageScan).ExclusiveSum(iClassSamples, iClassSamples, sumPrefix);
__syncthreads();
if (iClass < NClass)
{
classStartPos[classBlockOffset + iClass] = iClassSamples;
}
}
if (threadIdx.x == 0)
{
classStartPos[classBlockOffset + NClass] = sumPrefix.total;
assert(sumPrefix.total <= validSamples); // background data removed.
outValidSampleCount[N] = sumPrefix.total;
#if DUBUG_KERNEL
if (N == DUBUG_BATCH)
printf("After sortPerClass, batch:%d valid samples total:%d\n", N, sumPrefix.total);
#endif
}
}
template <int Threads = 256, int IPerThread = 4>
__global__ void sortPerClass_kernel_half(
// int N,
int samples, int NClass, int background, float scoreThreshold, const void* validSampleCountPtr,
const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, void* classStartPosPtr, void* outScorePtr,
void* outLabelPtr, void* outSampleIdxPtr, void* outValidSampleCountPtr)
{
typedef cub::BlockExchange<float, Threads, IPerThread> BlockExchangeKey;
typedef cub::BlockExchange<int, Threads, IPerThread> BlockExchangeI;
typedef cub::BlockRadixSort<float, Threads, IPerThread, int> BlockRadixSort;
typedef hipcub::BlockScan<int, Threads> BlockScanClass;
__shared__ union
{
typename BlockExchangeKey::TempStorage storageKey;
typename BlockExchangeI::TempStorage storageI;
typename BlockRadixSort::TempStorage storageSort;
typename BlockScanClass::TempStorage storageScan;
} temp_storage;
__shared__ int smemClassCount[MaxClassNum];
assert(NClass < MaxClassNum);
assert(IPerThread * Threads >= samples);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
const __half* inScore = static_cast<const __half*>(inScorePtr);
const __half* inLabel = static_cast<const __half*>(inLabelPtr);
int* classStartPos = static_cast<int*>(classStartPosPtr);
__half* outScore = static_cast<__half*>(outScorePtr);
__half* outLabel = static_cast<__half*>(outLabelPtr);
int* outSampleIdx = static_cast<int*>(outSampleIdxPtr);
int* outValidSampleCount = static_cast<int*>(outValidSampleCountPtr);
for (int s = threadIdx.x; s < NClass + 1; s += blockDim.x)
{
smemClassCount[s] = 0;
}
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
float key[IPerThread];
int iSample[IPerThread];
for (int i = 0; i < IPerThread; ++i)
{
iSample[i] = -1;
key[i] = -1.0f;
int curIdx = i * Threads + threadIdx.x;
if (curIdx < validSamples)
{
int label = __half2int_rd(inLabel[blockOffset + curIdx]);
float score = __half2float(inScore[blockOffset + curIdx]);
if (label != background && label != -1 && score >= scoreThreshold)
{
key[i] = getKey<float>(score, label, NClass);
iSample[i] = curIdx;
}
}
}
BlockExchangeKey(temp_storage.storageKey).StripedToBlocked(key);
__syncthreads();
BlockExchangeI(temp_storage.storageI).StripedToBlocked(iSample);
__syncthreads();
BlockRadixSort(temp_storage.storageSort).SortDescendingBlockedToStriped(key, iSample);
__syncthreads();
// store Idx
cub::StoreDirectStriped<Threads>(threadIdx.x, outSampleIdx + blockOffset, iSample, validSamples);
__half lable[IPerThread];
__half score[IPerThread];
for (int i = 0; i < IPerThread; ++i)
{
float label_float;
float score_float;
getScoreLable<float>(key[i], NClass, score_float, label_float);
lable[i] = __float2half(label_float);
score[i] = __float2half(score_float);
}
cub::StoreDirectStriped<Threads>(threadIdx.x, outScore + blockOffset, score, validSamples);
cub::StoreDirectStriped<Threads>(threadIdx.x, outLabel + blockOffset, lable, validSamples);
// final
for (int i = 0; i < IPerThread; ++i)
{
if (__half2float(lable[i]) >= 0)
{
atomicAdd(&smemClassCount[__half2int_rd(lable[i])], 1);
}
}
__syncthreads();
int classBlockOffset = N * (NClass + 1); // Exclusive-sum, 1st is 0, last is final sum
#if DUBUG_KERNEL
if (N == DUBUG_BATCH && threadIdx.x == 0)
{
printf("sortPerClass(N:%d) final count of each label, valid samples:%d\n", N, validSamples);
for (int k = 0; k < NClass; ++k)
{
if (smemClassCount[k] > 0)
printf("Batch:%d, L:%d, count:%d, \n", N, k, smemClassCount[k]);
}
}
__syncthreads();
#endif
BlockClassSumPrefix sumPrefix;
for (int s = 0; s < NClass; s += blockDim.x)
{ // s start from block
int iClassSamples = 0;
int iClass = s + threadIdx.x;
if (iClass < NClass)
{
iClassSamples = smemClassCount[iClass];
}
BlockScanClass(temp_storage.storageScan).ExclusiveSum(iClassSamples, iClassSamples, sumPrefix);
__syncthreads();
if (iClass < NClass)
{
classStartPos[classBlockOffset + iClass] = iClassSamples;
}
}
if (threadIdx.x == 0)
{
classStartPos[classBlockOffset + NClass] = sumPrefix.total;
assert(sumPrefix.total <= validSamples); // background data removed.
outValidSampleCount[N] = sumPrefix.total;
#if DUBUG_KERNEL
if (N == DUBUG_BATCH)
printf("After sortPerClass, batch:%d valid samples total:%d\n", N, sumPrefix.total);
#endif
}
}
template <typename DType>
__device__ __forceinline__ BBoxT<DType> readBbox(const BBoxT<DType>* inBbox, int idx)
{
BBoxT<DType> ret = ((BBoxT<DType>*) (inBbox))[idx];
return ret;
}
template <typename DType>
__device__ __forceinline__ DType boxIoU(const BBoxT<DType>& a, const BBoxT<DType>& b)
{
BBoxT<DType> overlap = {
dMAX(a.y1, b.y1), dMAX(a.x1, b.x1), dMIN(a.y2, b.y2), dMIN(a.x2, b.x2),
};
DType oW = overlap.x2 - overlap.x1;
DType oH = overlap.y2 - overlap.y1;
if (oW < (DType) 0 || oH < (DType) 0)
return (DType) 0;
DType oA = oW * oH;
return (oA / ((a.y2 - a.y1) * (a.x2 - a.x1) + (b.y2 - b.y1) * (b.x2 - b.x1) - oA));
}
// PerClassNMS
// gridDim.x : batch-N
// blockDim.x : Threads
// ItemsPerThreads : = divUp(samples, Threads)
// outFlagSamples OUT: int [N * samples]
template <typename DType, typename BoxType, int Threads = 256, int ItemsPerThreads = 4>
__global__ void PerClassNMS_kernel(
// int N,
int samples, int NClass, const float nmsThreshold, const void* validSampleCountPtr,
// const void *inScorePtr,
const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* classStartsPtr,
void* outFlagSamplesPtr)
{
typedef BBoxT<BoxType> BBox;
__shared__ struct
{
BBox refBox[MaxClassNum];
int endIdx[MaxClassNum];
int refIdx[MaxClassNum + 1];
bool markSamples[Threads * ItemsPerThreads];
int done;
} smemClasses;
assert(NClass + 1 < MaxClassNum);
assert(samples <= Threads * ItemsPerThreads);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
// const DType *inScore = static_cast<const DType *>(inScorePtr);
const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr);
const BBox* inBbox = static_cast<const BBox*>(inBboxPtr);
const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr);
const int* classStarts = static_cast<const int*>(classStartsPtr);
int* outFlagSamples = static_cast<int*>(outFlagSamplesPtr);
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
if (threadIdx.x == 0)
{
smemClasses.done = 0;
}
BBox curBox[ItemsPerThreads];
int label[ItemsPerThreads];
#pragma unroll
for (int ite = 0; ite * blockDim.x < validSamples; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
if (curIdx < validSamples)
{
label[ite] = (int) inLabel[blockOffset + curIdx];
curBox[ite] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + curIdx]);
}
else
{
label[ite] = -1;
}
smemClasses.markSamples[curIdx] = (label[ite] < 0 ? false : true);
}
int classBlockOffset = N * (NClass + 1);
for (int i = threadIdx.x; i < NClass + 1; i += blockDim.x)
{
int refIdx = classStarts[classBlockOffset + i];
smemClasses.refIdx[i] = refIdx;
smemClasses.refBox[i] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]);
}
__syncthreads();
for (int i = threadIdx.x; i < NClass; i += blockDim.x)
{
int endIdx = smemClasses.refIdx[i + 1];
smemClasses.endIdx[i] = endIdx;
if (endIdx == smemClasses.refIdx[i])
{
atomicAdd(&smemClasses.done, 1);
}
}
__syncthreads();
#if DUBUG_KERNEL
// print info
if (N == DUBUG_BATCH && threadIdx.x == 0)
{
printf("batch:%d, before starting NMS, done count:%d\n", N, smemClasses.done);
printf("batch:%d, Total num:%d, startPos:\n", N, validSamples);
for (int k = 0; k < NClass; ++k)
{
if (smemClasses.refIdx[k] != smemClasses.endIdx[k])
{
printf("Batch:%d, label:%d [%d : %d], check ref-label:%d\n", N, k, smemClasses.refIdx[k],
smemClasses.endIdx[k], (int) inLabel[blockOffset + smemClasses.refIdx[k]]);
}
}
printf("\n");
}
__syncthreads();
#endif
// class done to check stop point
while (smemClasses.done < NClass)
{
for (int ite = 0; ite * blockDim.x < validSamples; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
int refIdx = -1;
int endIdx = -1;
if (curIdx < validSamples && smemClasses.markSamples[curIdx])
{
if (label[ite] >= 0)
{
refIdx = smemClasses.refIdx[label[ite]];
endIdx = smemClasses.endIdx[label[ite]];
if (curIdx > refIdx && curIdx < endIdx)
{
BBox refBox = smemClasses.refBox[label[ite]];
if (boxIoU(refBox, curBox[ite]) > (DType) nmsThreshold)
{
smemClasses.markSamples[curIdx] = false;
}
}
}
}
}
__syncthreads();
// push refIdx/refBox forward to next mark
// only the refIdx thread to push itself. other threads idle
for (int i = threadIdx.x; i < NClass; i += blockDim.x)
{
int refIdx = smemClasses.refIdx[i];
int endIdx = smemClasses.endIdx[i];
if (refIdx < endIdx)
{
do
{
++refIdx;
} while (refIdx < endIdx && smemClasses.markSamples[refIdx] == false);
smemClasses.refIdx[i] = refIdx;
if (refIdx < endIdx)
{
smemClasses.refBox[i] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]);
}
else
{
atomicAdd(&smemClasses.done, 1);
}
}
}
__syncthreads();
}
// no need to write all data out
for (int segment = 0; segment < validSamples; segment += blockDim.x)
{
int curIdx = segment + threadIdx.x;
if (curIdx < validSamples)
{
outFlagSamples[blockOffset + curIdx] = (smemClasses.markSamples[curIdx] ? 1 : 0);
}
}
}
template <int Threads = 256, int ItemsPerThreads = 4>
__global__ void PerClassNMS_half_kernel(
// int N,
int samples, int NClass, const float nmsThreshold, const void* validSampleCountPtr,
// const void *inScorePtr,
const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* classStartsPtr,
void* outFlagSamplesPtr)
{
typedef BBoxT<__half> BBox;
__shared__ struct
{
BBox refBox[MaxClassNum];
int endIdx[MaxClassNum];
int refIdx[MaxClassNum + 1];
bool markSamples[Threads * ItemsPerThreads];
int done;
} smemClasses;
assert(NClass + 1 < MaxClassNum);
assert(samples <= Threads * ItemsPerThreads);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
// const DType *inScore = static_cast<const DType *>(inScorePtr);
const __half* inLabel = static_cast<const __half*>(inLabelPtr);
const BBox* inBbox = static_cast<const BBox*>(inBboxPtr);
const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr);
const int* classStarts = static_cast<const int*>(classStartsPtr);
int* outFlagSamples = static_cast<int*>(outFlagSamplesPtr);
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
if (threadIdx.x == 0)
{
smemClasses.done = 0;
}
BBox curBox[ItemsPerThreads];
int label[ItemsPerThreads];
#pragma unroll
for (int ite = 0; ite * blockDim.x < validSamples; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
if (curIdx < validSamples)
{
label[ite] = __half2int_rd(inLabel[blockOffset + curIdx]);
curBox[ite] = readBbox<__half>(inBbox, blockOffset + inBboxRefIdx[blockOffset + curIdx]);
}
else
{
label[ite] = -1;
}
smemClasses.markSamples[curIdx] = (label[ite] < 0 ? false : true);
}
int classBlockOffset = N * (NClass + 1);
for (int i = threadIdx.x; i < NClass + 1; i += blockDim.x)
{
int refIdx = classStarts[classBlockOffset + i];
smemClasses.refIdx[i] = refIdx;
smemClasses.refBox[i] = readBbox<__half>(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]);
}
__syncthreads();
for (int i = threadIdx.x; i < NClass; i += blockDim.x)
{
int endIdx = smemClasses.refIdx[i + 1];
smemClasses.endIdx[i] = endIdx;
if (endIdx == smemClasses.refIdx[i])
{
atomicAdd(&smemClasses.done, 1);
}
}
__syncthreads();
#if DUBUG_KERNEL
// print info
if (N == DUBUG_BATCH && threadIdx.x == 0)
{
printf("batch:%d, before starting NMS, done count:%d\n", N, smemClasses.done);
printf("batch:%d, Total num:%d, startPos:\n", N, validSamples);
for (int k = 0; k < NClass; ++k)
{
if (smemClasses.refIdx[k] != smemClasses.endIdx[k])
{
printf("Batch:%d, label:%d [%d : %d], check ref-label:%d\n", N, k, smemClasses.refIdx[k],
smemClasses.endIdx[k], (int) inLabel[blockOffset + smemClasses.refIdx[k]]);
}
}
printf("\n");
}
__syncthreads();
#endif
// class done to check stop point
while (smemClasses.done < NClass)
{
for (int ite = 0; ite * blockDim.x < validSamples; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
int refIdx = -1;
int endIdx = -1;
if (curIdx < validSamples && smemClasses.markSamples[curIdx])
{
if (label[ite] >= 0)
{
refIdx = smemClasses.refIdx[label[ite]];
endIdx = smemClasses.endIdx[label[ite]];
if (curIdx > refIdx && curIdx < endIdx)
{
BBox refBox_half = smemClasses.refBox[label[ite]];
BBox curBox_half = curBox[ite];
BBoxT<float> refBox;
BBoxT<float> curBox_float;
refBox.y1 = __half2float(refBox_half.y1);
refBox.x1 = __half2float(refBox_half.x1);
refBox.y2 = __half2float(refBox_half.y2);
refBox.x2 = __half2float(refBox_half.x2);
curBox_float.y1 = __half2float(curBox_half.y1);
curBox_float.x1 = __half2float(curBox_half.x1);
curBox_float.y2 = __half2float(curBox_half.y2);
curBox_float.x2 = __half2float(curBox_half.x2);
if (boxIoU<float>(refBox, curBox_float) > nmsThreshold)
{
smemClasses.markSamples[curIdx] = false;
}
}
}
}
}
__syncthreads();
// push refIdx/refBox forward to next mark
// only the refIdx thread to push itself. other threads idle
for (int i = threadIdx.x; i < NClass; i += blockDim.x)
{
int refIdx = smemClasses.refIdx[i];
int endIdx = smemClasses.endIdx[i];
if (refIdx < endIdx)
{
do
{
++refIdx;
} while (refIdx < endIdx && smemClasses.markSamples[refIdx] == false);
smemClasses.refIdx[i] = refIdx;
if (refIdx < endIdx)
{
smemClasses.refBox[i] = readBbox<__half>(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]);
}
else
{
atomicAdd(&smemClasses.done, 1);
}
}
}
__syncthreads();
}
// no need to write all data out
for (int segment = 0; segment < validSamples; segment += blockDim.x)
{
int curIdx = segment + threadIdx.x;
if (curIdx < validSamples)
{
outFlagSamples[blockOffset + curIdx] = (smemClasses.markSamples[curIdx] ? 1 : 0);
}
}
}
// TopKGather
// gridDim.x : batch-N
// blockDim.x : Threads
// ItemsPerThreads : = divUp(samples, Threads)
// outDetectionCount : int [N], must be set 0 before kernel
#define MaxItemsPerThreads 8
template <typename DType, typename BoxType, int Threads = 256>
__global__ void TopKGatherProposal_kernel(int samples, int keepTopK, const void* validSampleCountPtr,
const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr,
const void* inFlagSamplesPtr, void* outBboxPtr)
{
typedef BBoxT<BoxType> BBox;
typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1;
typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2;
typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3;
typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4;
typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5;
typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6;
typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7;
typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8;
__shared__ union
{
typename BlockRadixSort8::TempStorage sort8;
typename BlockRadixSort7::TempStorage sort7;
typename BlockRadixSort6::TempStorage sort6;
typename BlockRadixSort5::TempStorage sort5;
typename BlockRadixSort4::TempStorage sort4;
typename BlockRadixSort3::TempStorage sort3;
typename BlockRadixSort2::TempStorage sort2;
typename BlockRadixSort1::TempStorage sort1;
} temp_storage;
assert(MaxItemsPerThreads * Threads >= samples);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
const DType* inScore = static_cast<const DType*>(inScorePtr);
const BBox* inBbox = static_cast<const BBox*>(inBboxPtr);
const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr);
const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr);
BBox* outBbox = static_cast<BBox*>(outBboxPtr);
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
int finalTopK = dMIN(keepTopK, validSamples);
int idx[MaxItemsPerThreads];
DType score[MaxItemsPerThreads];
int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x;
for (int ite = 0; ite < totalItems; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx])
{
idx[ite] = curIdx;
score[ite] = inScore[blockOffset + curIdx];
}
else
{
idx[ite] = -1;
score[ite] = 0.0f;
}
}
switch (totalItems)
{
case 0: break;
case 1:
BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx);
break;
case 2:
BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx);
break;
case 3:
BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx);
break;
case 4:
BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx);
break;
case 5:
BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx);
break;
case 6:
BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx);
break;
case 7:
BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx);
break;
case 8:
BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx);
break;
default: assert(false);
}
__syncthreads();
int outBlockOffset = N * keepTopK;
int topkItems = (keepTopK + (Threads - 1)) / Threads;
for (int i = 0; i < topkItems; ++i)
{
int curI = i * blockDim.x + threadIdx.x;
if (curI < keepTopK)
{
BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f};
if (curI < finalTopK && idx[i] >= 0 && float(score[i]) > MinValidScore)
{
oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]];
}
((BBox*) outBbox)[outBlockOffset + curI] = oB;
}
}
}
#define MaxItemsPerThreads 8
template <typename DType, typename BoxType, int Threads = 256>
__global__ void TopKGather_kernel(int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr,
const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr,
void* outDetectionPtr)
{
typedef BBoxT<BoxType> BBox;
typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1;
typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2;
typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3;
typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4;
typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5;
typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6;
typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7;
typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8;
__shared__ union
{
typename BlockRadixSort8::TempStorage sort8;
typename BlockRadixSort7::TempStorage sort7;
typename BlockRadixSort6::TempStorage sort6;
typename BlockRadixSort5::TempStorage sort5;
typename BlockRadixSort4::TempStorage sort4;
typename BlockRadixSort3::TempStorage sort3;
typename BlockRadixSort2::TempStorage sort2;
typename BlockRadixSort1::TempStorage sort1;
} temp_storage;
assert(MaxItemsPerThreads * Threads >= samples);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
const DType* inScore = static_cast<const DType*>(inScorePtr);
const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr); // InLabel keeps INT32
const BBox* inBbox = static_cast<const BBox*>(inBboxPtr);
const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr);
const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr);
DType* outDetections = static_cast<DType*>(outDetectionPtr);
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
int finalTopK = dMIN(keepTopK, validSamples);
int idx[MaxItemsPerThreads];
DType score[MaxItemsPerThreads];
int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x;
for (int ite = 0; ite < totalItems; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx])
{
idx[ite] = curIdx;
score[ite] = inScore[blockOffset + curIdx];
}
else
{
idx[ite] = -1;
score[ite] = 0.0f;
}
}
switch (totalItems)
{
case 0: break;
case 1:
BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx);
break;
case 2:
BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx);
break;
case 3:
BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx);
break;
case 4:
BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx);
break;
case 5:
BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx);
break;
case 6:
BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx);
break;
case 7:
BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx);
break;
case 8:
BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx);
break;
default: assert(false);
}
__syncthreads();
int outBlockOffset = N * keepTopK;
int topkItems = (keepTopK + (Threads - 1)) / Threads;
for (int i = 0; i < topkItems; ++i)
{
int curI = i * blockDim.x + threadIdx.x;
if (curI < keepTopK)
{
BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f};
DType oS = 0.0f;
BoxType oL = -1;
if (curI < finalTopK && idx[i] >= 0 && float(score[i]) > MinValidScore)
{
oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]];
oS = score[i];
oL = (BoxType) inLabel[blockOffset + idx[i]];
}
outDetections[(outBlockOffset + curI) * 6] = oB.y1;
outDetections[(outBlockOffset + curI) * 6 + 1] = oB.x1;
outDetections[(outBlockOffset + curI) * 6 + 2] = oB.y2;
outDetections[(outBlockOffset + curI) * 6 + 3] = oB.x2;
outDetections[(outBlockOffset + curI) * 6 + 4] = oL;
outDetections[(outBlockOffset + curI) * 6 + 5] = oS;
}
}
}
RefineDetectionWorkSpace::RefineDetectionWorkSpace(
const int batchSize, const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType inType)
: argMaxScoreDims(sampleCount, 1)
, argMaxBboxDims(sampleCount, 4)
, argMaxLabelDims(sampleCount, 1)
, sortClassScoreDims(sampleCount, 1)
, sortClassLabelDims(sampleCount, 1)
, sortClassSampleIdxDims(sampleCount + 1, 1)
, sortClassPosDims(param.numClasses + 1, 1)
, sortNMSMarkDims(sampleCount, 1)
{
size_t sumSize = 0;
const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT;
// resource
// arMaxScore : [N, samples] : m_Type
argMaxScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize);
argMaxBboxOffset = sumSize;
// argMaxBbox : [N, samples, 4] : m_Type
sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize);
argMaxLabelOffset = sumSize;
// argMaxLabel : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassScoreOffset = sumSize;
// sortClassScore : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize);
sortClassLabelOffset = sumSize;
// sortClassLabel : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize);
sortClassSampleIdxOffset = sumSize;
// sortClassSampleIdx : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassValidCountOffset = sumSize;
// sortClassValidCount : [N, 1] : kINT32
sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassPosOffset = sumSize;
// sortClassPos : [N, numClasses+1] : kINT32
sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortNMSMarkOffset = sumSize;
// sortNMSMark : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
totalSize = sumSize;
}
ProposalWorkSpace::ProposalWorkSpace(const int batchSize, const int inputCnt, const int sampleCount,
const RefineNMSParameters& param, const nvinfer1::DataType inType)
: preRefineScoreDims(inputCnt, 1)
, preRefineSortedScoreDims(inputCnt, 1)
, preRefineBboxDims(inputCnt, 4)
, argMaxScoreDims(sampleCount, 1)
, argMaxBboxDims(sampleCount, 4)
, argMaxLabelDims(sampleCount, 1)
, sortClassScoreDims(sampleCount, 1)
, sortClassLabelDims(sampleCount, 1)
, sortClassSampleIdxDims(sampleCount, 1)
, sortClassPosDims(param.numClasses + 1, 1)
, sortNMSMarkDims(sampleCount, 1)
{
size_t sumSize = 0;
const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT;
// resource
// temp storage size for sorting scores
tempStorageOffset = sumSize;
sumSize += (1 << 23) * batchSize;
// preRefineScore : [N, inputcnt, 1] // extracted foreground score from inputs[0]
preRefineScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(preRefineScoreDims) * typeSize(type) * batchSize);
// preRefineSortedScore: [N, inputcnt, 1]
preRefineSortedScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(preRefineSortedScoreDims) * typeSize(type) * batchSize);
// preRefineBbox: [N, inputcnt, 4] // sorted bbox
preRefineBboxOffset = sumSize;
sumSize += AlignMem(dimVolume(preRefineBboxDims) * typeSize(type) * batchSize);
// arMaxScore : [N, samples] : m_Type
argMaxScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize);
argMaxBboxOffset = sumSize;
// argMaxBbox : [N, samples, 4] : m_Type
sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize);
argMaxLabelOffset = sumSize;
// argMaxLabel : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassScoreOffset = sumSize;
// sortClassScore : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize);
sortClassLabelOffset = sumSize;
// sortClassLabel : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize);
sortClassSampleIdxOffset = sumSize;
// sortClassSampleIdx : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassValidCountOffset = sumSize;
// sortClassValidCount : [N, 1] : kINT32
sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassPosOffset = sumSize;
// sortClassPos : [N, numClasses+1] : kINT32
sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortNMSMarkOffset = sumSize;
// sortNMSMark : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
totalSize = sumSize;
}
MultilevelProposeROIWorkSpace::MultilevelProposeROIWorkSpace(const int batchSize, const int inputCnt,
const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType inType)
: preRefineSortedScoreDims(inputCnt, 1)
, preRefineBboxDims(inputCnt, 4)
, argMaxScoreDims(sampleCount, 1)
, argMaxBboxDims(sampleCount, 4)
, argMaxLabelDims(sampleCount, 1)
, sortClassScoreDims(sampleCount, 1)
, sortClassLabelDims(sampleCount, 1)
, sortClassSampleIdxDims(sampleCount + 1, 1)
, sortClassPosDims(param.numClasses + 1, 1)
, sortNMSMarkDims(sampleCount, 1)
{
size_t sumSize = 0;
const nvinfer1::DataType type = inType;
// resource
// temp storage size for sorting scores
tempStorageOffset = sumSize;
sumSize += (1 << 23) * batchSize;
// preRefineSortedScore: [N, inputcnt, 1]
preRefineSortedScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(preRefineSortedScoreDims) * typeSize(type) * batchSize);
// preRefineBbox: [N, inputcnt, 4] // sorted bbox
preRefineBboxOffset = sumSize;
sumSize += AlignMem(dimVolume(preRefineBboxDims) * typeSize(type) * batchSize);
// argMaxScore : [N, samples] : m_Type
argMaxScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize);
argMaxBboxOffset = sumSize;
// argMaxBbox : [N, samples, 4] : m_Type
sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize);
argMaxLabelOffset = sumSize;
// argMaxLabel : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(type) * batchSize);
sortClassScoreOffset = sumSize;
// sortClassScore : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize);
sortClassLabelOffset = sumSize;
// sortClassLabel : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize);
sortClassSampleIdxOffset = sumSize;
// sortClassSampleIdx : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassValidCountOffset = sumSize;
// sortClassValidCount : [N, 1] : kINT32
sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassPosOffset = sumSize;
// sortClassPos : [N, numClasses+1] : kINT32
sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortNMSMarkOffset = sumSize;
// sortNMSMark : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
totalSize = sumSize;
}
ConcatTopKWorkSpace::ConcatTopKWorkSpace(
const int batchSize, const int concatCnt, const int topK, const nvinfer1::DataType inType)
: concatedScoreDims(concatCnt * topK, 1)
, concatedBBoxDims(concatCnt * topK, 4)
, sortedScoreDims(concatCnt * topK, 1)
, sortedBBoxDims(concatCnt * topK, 4)
{
size_t sumSize = 0;
// const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT;
const nvinfer1::DataType type = inType;
// resource
// temp storage size for sorting scores
tempStorageOffset = sumSize;
sumSize += (1 << 23) * batchSize;
// concatedScoreOffset: [N, concatCnt*topK, 1]
concatedScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(concatedScoreDims) * typeSize(type) * batchSize);
// concatedBBoxOffset: [N, concatCnt*topK, 4]
concatedBBoxOffset = sumSize;
sumSize += AlignMem(dimVolume(concatedBBoxDims) * typeSize(type) * batchSize);
// sortedScoreOffset: [N, concatCnt * topK, 1]
sortedScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(sortedScoreDims) * typeSize(type) * batchSize);
// sortedBBoxOffset: [N, concatCnt * topK, 4]
sortedBBoxOffset = sumSize;
sumSize += AlignMem(dimVolume(sortedBBoxDims) * typeSize(type) * batchSize);
totalSize = sumSize;
}
template <int Threads>
hipError_t argMaxGroup(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass,
const void* inScore, const void* inBbox, const void* validSamples, void* outScore, void* outLabel, void* outBbox)
{
int gridX = nAlignDown(dMIN(samples, 512 / N), 32);
gridX = dMAX(gridX, 1);
dim3 gridDim = {static_cast<unsigned int>(gridX), static_cast<unsigned int>(N), 1};
dim3 threads = {Threads, 1, 1};
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
hipLaunchKernelGGL(( argMaxGroup_kernel<float, float, Threads>), dim3(gridDim), dim3(threads), 0, stream,
samples, 0, NClass, inScore, inBbox, validSamples, outScore, outLabel, outBbox);
break;
case nvinfer1::DataType::kHALF: break;
default: assert(false);
}
return hipGetLastError();
}
template <int Threads>
hipError_t argMaxWOBackground(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass,
const void* inScore, const void* inBbox, const void* validSamples, void* outScore, void* outLabel, void* outBbox)
{
int gridX = nAlignDown(dMIN(samples, 512 / N), 32);
gridX = dMAX(gridX, 1);
dim3 gridDim = {static_cast<unsigned int>(gridX), static_cast<unsigned int>(N), 1};
dim3 threads = {Threads, 1, 1};
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
hipLaunchKernelGGL(( argMaxGroup_kernel<float, float, Threads>), dim3(gridDim), dim3(threads), 0, stream,
samples, 1, NClass, inScore, inBbox, validSamples, outScore, outLabel, outBbox);
break;
case nvinfer1::DataType::kHALF: break;
default: assert(false);
}
return hipGetLastError();
}
template <int Threads, int ItermPerThreads>
hipError_t sortPerClass(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass, int background,
float scoreThreshold, const void* inSampleValidCount, const void* inScorePtr, const void* inLabelPtr,
const void* inBboxPtr, void* outclassStartPosPtr, void* outScorePtr, void* outLabelPtr, void* outSampleIdxPtr,
void* outValidSampleCountPtr)
{
int blocks = N;
int threads = Threads;
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
hipLaunchKernelGGL(( sortPerClass_kernel<float, float, Threads, ItermPerThreads>), dim3(blocks), dim3(threads), 0, stream, samples, NClass,
background, scoreThreshold, inSampleValidCount, inScorePtr, inLabelPtr, inBboxPtr, outclassStartPosPtr,
outScorePtr, outLabelPtr, outSampleIdxPtr, outValidSampleCountPtr);
break;
case nvinfer1::DataType::kHALF:
hipLaunchKernelGGL(( sortPerClass_kernel_half<Threads, ItermPerThreads>), dim3(blocks), dim3(threads), 0, stream, samples, NClass,
background, scoreThreshold, inSampleValidCount, inScorePtr, inLabelPtr, inBboxPtr, outclassStartPosPtr,
outScorePtr, outLabelPtr, outSampleIdxPtr, outValidSampleCountPtr);
break;
default: assert(false);
}
return hipGetLastError();
};
template <int Threads>
hipError_t PerClassNMS(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass,
const float nmsThreshold, const void* validSampleCount,
// const void *inScore,
const void* inLabel, const void* inBbox, const void* inBboxRefIdx, const void* classStarts, void* outFlagSamples)
{
int blocks = N;
int threads = Threads;
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
hipLaunchKernelGGL(( PerClassNMS_kernel<float, float, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, NClass, nmsThreshold,
validSampleCount, inLabel, inBbox, inBboxRefIdx, classStarts, outFlagSamples);
break;
case nvinfer1::DataType::kHALF:
hipLaunchKernelGGL(( PerClassNMS_half_kernel<Threads>), dim3(blocks), dim3(threads), 0, stream, samples, NClass, nmsThreshold,
validSampleCount, inLabel, inBbox, inBboxRefIdx, classStarts, outFlagSamples);
break;
default: assert(false);
}
return hipGetLastError();
}
template <int Threads>
hipError_t KeepTopKGather(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int keepTopK,
const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr,
const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outDetections, int proposal)
{
int blocks = N;
int threads = Threads;
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
if (proposal)
{
hipLaunchKernelGGL(( TopKGatherProposal_kernel<float, float, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr,
outDetections);
}
else
{
hipLaunchKernelGGL(( TopKGather_kernel<float, float, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr,
outDetections);
}
break;
case nvinfer1::DataType::kHALF: break;
default: assert(false);
}
return hipGetLastError();
}
// TopKGather For TLT RPN Proposal
// gridDim.x : batch-N
// blockDim.x : Threads
// ItemsPerThreads : = divUp(samples, Threads)
// outDetectionCount : int [N], must be set 0 before kernel
#define MaxItemsPerThreads 8
template <typename DType, typename BoxType, int Threads = 256>
__global__ void TopKGatherBoxScore_kernel(int samples, int keepTopK, const void* validSampleCountPtr,
const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr,
const void* inFlagSamplesPtr, void* outScorePtr, void* outBboxPtr)
{
typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1;
typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2;
typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3;
typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4;
typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5;
typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6;
typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7;
typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8;
__shared__ union {
typename BlockRadixSort8::TempStorage sort8;
typename BlockRadixSort7::TempStorage sort7;
typename BlockRadixSort6::TempStorage sort6;
typename BlockRadixSort5::TempStorage sort5;
typename BlockRadixSort4::TempStorage sort4;
typename BlockRadixSort3::TempStorage sort3;
typename BlockRadixSort2::TempStorage sort2;
typename BlockRadixSort1::TempStorage sort1;
} temp_storage;
assert(MaxItemsPerThreads * Threads >= samples);
typedef BBoxT<BoxType> BBox;
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
const DType* inScore = static_cast<const DType*>(inScorePtr);
const BBox* inBbox = static_cast<const BBox*>(inBboxPtr);
const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr);
const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr);
BBox* outBbox = static_cast<BBox*>(outBboxPtr);
DType* outScore = static_cast<DType*>(outScorePtr);
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
int finalTopK = dMIN(keepTopK, validSamples);
int idx[MaxItemsPerThreads];
DType score[MaxItemsPerThreads];
int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x;
for (int ite = 0; ite < totalItems; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx])
{
idx[ite] = curIdx;
score[ite] = inScore[blockOffset + curIdx];
}
else
{
idx[ite] = -1;
score[ite] = 0.0f;
}
}
switch (totalItems)
{
case 0: break;
case 1:
BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx);
break;
case 2:
BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx);
break;
case 3:
BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx);
break;
case 4:
BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx);
break;
case 5:
BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx);
break;
case 6:
BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx);
break;
case 7:
BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx);
break;
case 8:
BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx);
break;
default: assert(false);
}
__syncthreads();
int outBlockOffset = N * keepTopK;
int topkItems = (keepTopK + (Threads - 1)) / Threads;
for (int i = 0; i < topkItems; ++i)
{
int curI = i * blockDim.x + threadIdx.x;
if (curI < keepTopK)
{
BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f};
DType oS = 0.0f;
if (curI < finalTopK && idx[i] >= 0)
{
oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]];
oS = score[i];
}
((BBox*) outBbox)[outBlockOffset + curI] = oB;
outScore[outBlockOffset + curI] = oS;
}
}
}
template <int Threads>
hipError_t KeepTopKGatherBoxScore(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int keepTopK,
const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr,
const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outScores, void* outDetections, int proposal)
{
int blocks = N;
int threads = Threads;
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
if (proposal)
{
hipLaunchKernelGGL(( TopKGatherBoxScore_kernel<float, float, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outScores,
outDetections);
}
else
{
hipLaunchKernelGGL(( TopKGather_kernel<float, float, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr,
outDetections);
}
break;
case nvinfer1::DataType::kHALF:
if (proposal)
{
hipLaunchKernelGGL(( TopKGatherBoxScore_kernel<__half, __half, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outScores,
outDetections);
}
else
{
hipLaunchKernelGGL(( TopKGather_kernel<__half, __half, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr,
outDetections);
}
break;
default: assert(false);
}
return hipGetLastError();
}
hipError_t RefineBatchClassNMS(hipStream_t stream, int N, int samples, nvinfer1::DataType dtype,
const RefineNMSParameters& param, const RefineDetectionWorkSpace& refineOffset, void* workspace,
const void* inScores, const void* inDelta, const void* inCountValid, const void* inROI, void* outDetections)
{
int NClass = param.numClasses;
int8_t* wsPtr = static_cast<int8_t*>(workspace);
void* argMaxScorePtr = wsPtr + refineOffset.argMaxScoreOffset;
void* argMaxLabelPtr = wsPtr + refineOffset.argMaxLabelOffset;
void* argMaxBBoxPtr = wsPtr + refineOffset.argMaxBboxOffset;
void* sortClassScorePtr = wsPtr + refineOffset.sortClassScoreOffset;
void* sortClassLabelPtr = wsPtr + refineOffset.sortClassLabelOffset;
void* sortClassSampleIdxPtr = wsPtr + refineOffset.sortClassSampleIdxOffset;
void* sortClassValidCountPtr = wsPtr + refineOffset.sortClassValidCountOffset;
void* sortClassPosPtr = wsPtr + refineOffset.sortClassPosOffset;
void* sortNMSMarkPtr = wsPtr + refineOffset.sortNMSMarkOffset;
hipError_t status = hipSuccess;
CUASSERT(hipMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream));
if (NClass > 1)
{ // multiple classes
status = argMaxGroup<32>(stream, N, dtype, samples, NClass, inScores, inDelta, inCountValid, argMaxScorePtr,
argMaxLabelPtr, argMaxBBoxPtr); // argMaxBBoxPtr means delta of bboxes
assert(status == hipSuccess);
CUASSERT(status);
}
else
{ // Only one class
argMaxScorePtr = const_cast<void*>(inScores);
argMaxBBoxPtr = const_cast<void*>(inDelta);
int threads = 512;
int blocks = (N * samples + threads - 1) / threads;
blocks = dMIN(blocks, 8);
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
hipLaunchKernelGGL(( resetMemValue_kernel<float>), dim3(blocks), dim3(threads), 0, stream, argMaxLabelPtr, N * samples, 0);
break;
}
case nvinfer1::DataType::kHALF: { break;
}
default: assert(false);
}
}
status = ApplyDelta2Bboxes(stream, N, samples, inROI, argMaxBBoxPtr, argMaxBBoxPtr);
assert(status == hipSuccess);
if (samples <= 1024)
{
status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 2048)
{
status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 4096)
{
status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else
{
assert(false && "unsupported sortPerClass");
return hipErrorLaunchFailure;
}
assert(status == hipSuccess);
CUASSERT(status);
status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr,
// sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr);
assert(status == hipSuccess);
CUASSERT(status);
status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outDetections, 0);
assert(status == hipSuccess);
CUASSERT(status);
return status;
}
hipError_t DetectionPostProcess(hipStream_t stream, int N, int samples, const float* regWeight,
const float inputHeight, const float inputWidth, nvinfer1::DataType dtype, const RefineNMSParameters& param,
const RefineDetectionWorkSpace& refineOffset, void* workspace, const void* inScores, const void* inDelta,
const void* inCountValid, const void* inROI, void* outDetections)
{
int NClass = param.numClasses;
int8_t* wsPtr = static_cast<int8_t*>(workspace);
void* argMaxScorePtr = wsPtr + refineOffset.argMaxScoreOffset;
void* argMaxLabelPtr = wsPtr + refineOffset.argMaxLabelOffset;
void* argMaxBBoxPtr = wsPtr + refineOffset.argMaxBboxOffset;
void* sortClassScorePtr = wsPtr + refineOffset.sortClassScoreOffset;
void* sortClassLabelPtr = wsPtr + refineOffset.sortClassLabelOffset;
void* sortClassSampleIdxPtr = wsPtr + refineOffset.sortClassSampleIdxOffset;
void* sortClassValidCountPtr = wsPtr + refineOffset.sortClassValidCountOffset;
void* sortClassPosPtr = wsPtr + refineOffset.sortClassPosOffset;
void* sortNMSMarkPtr = wsPtr + refineOffset.sortNMSMarkOffset;
hipError_t status = hipSuccess;
CUASSERT(hipMemsetAsync(argMaxScorePtr, 0, N * samples * sizeof(float), stream));
CUASSERT(hipMemsetAsync(argMaxBBoxPtr, 0, N * samples * 4 * sizeof(float), stream));
CUASSERT(hipMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream));
CUASSERT(hipMemsetAsync(sortClassPosPtr, 0, N * (NClass + 1) * sizeof(int), stream));
CUASSERT(hipMemsetAsync(sortClassSampleIdxPtr, 0, N * (samples + 1) * sizeof(int), stream));
if (NClass > 1)
{ // multiple classes
status = argMaxWOBackground<32>(stream, N, dtype, samples, NClass, inScores, inDelta, inCountValid,
argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr); // argMaxBBoxPtr means delta of bboxes
assert(status == hipSuccess);
CUASSERT(status);
}
else
{ // Only one class
argMaxScorePtr = const_cast<void*>(inScores);
argMaxBBoxPtr = const_cast<void*>(inDelta);
int threads = 512;
int blocks = (N * samples + threads - 1) / threads;
blocks = dMIN(blocks, 8);
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
hipLaunchKernelGGL(( resetMemValue_kernel<float>), dim3(blocks), dim3(threads), 0, stream, argMaxLabelPtr, N * samples, 0);
break;
}
case nvinfer1::DataType::kHALF: { break;
}
default: assert(false);
}
}
status = DecodeBBoxes(stream, N, samples, regWeight, inputHeight, inputWidth, inROI, argMaxBBoxPtr, argMaxBBoxPtr, dtype);
assert(status == hipSuccess);
if (samples <= 1024)
{
status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 2048)
{
status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 4096)
{
status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else
{
assert(false && "unsupported sortPerClass");
return hipErrorLaunchFailure;
}
assert(status == hipSuccess);
CUASSERT(status);
status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr,
// sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr);
CUASSERT(status);
status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outDetections, 0);
CUASSERT(status);
return status;
}
struct BF_SCORE
{
float bg, fg;
};
// in_scores : [N, samples, 2]
// output_score : [N, samples, 1]
__global__ void extract_fg_kernel(int samples, const void* in_scores, void* output_score)
{
const BF_SCORE* in = static_cast<const BF_SCORE*>(in_scores);
float* out = static_cast<float*>(output_score);
int N = blockIdx.x;
int blockOffset = N * samples;
int totalItems = (samples + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < samples)
{
out[blockOffset + cur_id] = in[blockOffset + cur_id].fg;
}
}
}
__global__ void set_offset_kernel(int stride, int size, int* output)
{
// One block, because batch size shouldn't be too large.
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
output[i] = i * stride;
}
}
template <typename Dtype>
__global__ void resample_kernel(int orig_size, int sample_size, const void* orig_score_ptr, const void* orig_bbox_ptr,
void* sampled_score_ptr, void* sampled_bbox_ptr)
{
const Dtype* in_score = static_cast<const Dtype*>(orig_score_ptr);
const BBoxT<Dtype>* in_bbox = static_cast<const BBoxT<Dtype>*>(orig_bbox_ptr);
Dtype* out_score = static_cast<Dtype*>(sampled_score_ptr);
BBoxT<Dtype>* out_bbox = static_cast<BBoxT<Dtype>*>(sampled_bbox_ptr);
int N = blockIdx.x;
int blockOffset_in = N * orig_size;
int blockOffset_out = N * sample_size;
int realSampleCnt = dMIN(sample_size, orig_size);
int totalItems = (realSampleCnt + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < realSampleCnt)
{
out_score[blockOffset_out + cur_id] = in_score[blockOffset_in + cur_id];
out_bbox[blockOffset_out + cur_id] = in_bbox[blockOffset_in + cur_id];
}
}
}
hipError_t proposalRefineBatchClassNMS(hipStream_t stream, int N, int inputCnt, int samples, nvinfer1::DataType dtype,
const RefineNMSParameters& param, const ProposalWorkSpace& proposalOffset, void* workspace,
const void* inScores, //[N, inputcnt, 2]
const void* inDelta, //[N, inputcnt, 4]
const void* inCountValid,
const void* inAnchors, //[N, inputcnt, 4]
void* outProposals)
{
int8_t* wsPtr = static_cast<int8_t*>(workspace);
void* tempStoragePtr = wsPtr + proposalOffset.tempStorageOffset;
void* preRefineScorePtr = wsPtr + proposalOffset.preRefineScoreOffset;
void* preRefineSortedScorePtr = wsPtr + proposalOffset.preRefineSortedScoreOffset;
void* preRefineBboxPtr = wsPtr + proposalOffset.preRefineBboxOffset;
void* argMaxScorePtr = wsPtr + proposalOffset.argMaxScoreOffset;
void* argMaxLabelPtr = wsPtr + proposalOffset.argMaxLabelOffset;
void* argMaxBBoxPtr = wsPtr + proposalOffset.argMaxBboxOffset;
void* sortClassScorePtr = wsPtr + proposalOffset.sortClassScoreOffset;
void* sortClassLabelPtr = wsPtr + proposalOffset.sortClassLabelOffset;
void* sortClassSampleIdxPtr = wsPtr + proposalOffset.sortClassSampleIdxOffset;
void* sortClassValidCountPtr = wsPtr + proposalOffset.sortClassValidCountOffset;
void* sortClassPosPtr = wsPtr + proposalOffset.sortClassPosOffset;
void* sortNMSMarkPtr = wsPtr + proposalOffset.sortNMSMarkOffset;
hipError_t status = hipSuccess;
CUASSERT(hipMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream));
// extract foreground score
hipLaunchKernelGGL(( extract_fg_kernel), dim3(N), dim3(dMIN(inputCnt, 1024)), 0, stream, inputCnt, inScores, preRefineScorePtr);
CUASSERT(hipGetLastError());
// Here, inDelta are converted to normalize coordinates based on anchors
status = ApplyDelta2Bboxes(stream, N, inputCnt, inAnchors, inDelta, const_cast<void*>(inDelta));
CUASSERT(status);
// sort the score
// d_key_in: preRefineScorePtr [N, inputCnt, 1]
// d_key_out: preRefineSortedScorePtr
// d_values_in: inDelta [N, inputCnt, 4]
// d_values_out: preRefineBboxPtr
// num_items: inputCnt*N
// num_segments: N
// offsets: [0, inputCnt, inputCnt*2, ..., ]
int* offsets = static_cast<int*>(tempStoragePtr);
hipLaunchKernelGGL(( set_offset_kernel), dim3(1), dim3(1024), 0, stream, inputCnt, N + 1, offsets);
assert(hipGetLastError() == hipSuccess);
tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1));
size_t temp_storage_bytes = 0;
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(NULL, temp_storage_bytes, (float*) preRefineScorePtr,
(float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N,
offsets, offsets + 1, 0, 8 * sizeof(float), stream);
assert((1 << 23) * (size_t) N > temp_storage_bytes);
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(tempStoragePtr, temp_storage_bytes, (float*) preRefineScorePtr,
(float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N,
offsets, offsets + 1, 0, 8 * sizeof(float), stream);
int NClass = param.numClasses;
assert(NClass == 1);
if (NClass == 1)
{ // Only one class
hipLaunchKernelGGL(( resample_kernel<float>), dim3(N), dim3(dMIN(samples, 1024)), 0, stream,
inputCnt, samples, preRefineSortedScorePtr, preRefineBboxPtr, argMaxScorePtr, argMaxBBoxPtr);
int threads = 512;
int blocks = (N * samples + threads - 1) / threads;
blocks = dMIN(blocks, 8);
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
hipLaunchKernelGGL(( resetMemValue_kernel<float>), dim3(blocks), dim3(threads), 0, stream, argMaxLabelPtr, N * samples, 0);
break;
}
case nvinfer1::DataType::kHALF: { break;
}
default: assert(false);
}
}
if (samples <= 1024)
{
status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 2048)
{
status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 4096)
{
status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else
{
assert(false && "unsupported sortPerClass");
return hipErrorLaunchFailure;
}
CUASSERT(status);
status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr,
// sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr);
CUASSERT(status);
status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outProposals, 1);
CUASSERT(status);
return status;
}
template<typename Dtype>
void score_bbox_cub_sort(void* tempStorage,
const void* inScore,
void* sortedScore,
const void* inBBox,
void* sortedBBox,
int totalCnt,
int segCnt,
int* offsets,
hipStream_t stream
)
{
size_t temp_storage_bytes = 0;
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(NULL, temp_storage_bytes, (Dtype*) inScore,
(Dtype*) sortedScore, (BBoxT<Dtype>*) inBBox, (BBoxT<Dtype>*) sortedBBox, totalCnt, segCnt,
offsets, offsets + 1, 0, 8 * sizeof(Dtype), stream);
CUASSERT(hipGetLastError());
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(tempStorage, temp_storage_bytes, (Dtype*) inScore,
(Dtype*) sortedScore, (BBoxT<Dtype>*) inBBox, (BBoxT<Dtype>*) sortedBBox, totalCnt, segCnt,
offsets, offsets + 1, 0, 8 * sizeof(Dtype), stream);
CUASSERT(hipGetLastError());
}
hipError_t MultilevelPropose(hipStream_t stream, int N, int inputCnt, int samples, const float* regWeight,
const float inputHeight, const float inputWidth, nvinfer1::DataType dtype, const RefineNMSParameters& param,
const MultilevelProposeROIWorkSpace& proposalOffset, void* workspace,
const void* inScore, //[N, inputcnt, 1]
const void* inDelta, //[N, inputcnt, 4]
void* inCountValid,
const void* inAnchors, //[N, inputcnt, 4]
void* outScore, void* outBbox)
{
int8_t* wsPtr = static_cast<int8_t*>(workspace);
void* tempStoragePtr = wsPtr + proposalOffset.tempStorageOffset;
void* preRefineSortedScorePtr = wsPtr + proposalOffset.preRefineSortedScoreOffset;
void* preRefineBboxPtr = wsPtr + proposalOffset.preRefineBboxOffset;
void* argMaxScorePtr = wsPtr + proposalOffset.argMaxScoreOffset;
void* argMaxLabelPtr = wsPtr + proposalOffset.argMaxLabelOffset;
void* argMaxBBoxPtr = wsPtr + proposalOffset.argMaxBboxOffset;
void* sortClassScorePtr = wsPtr + proposalOffset.sortClassScoreOffset;
void* sortClassLabelPtr = wsPtr + proposalOffset.sortClassLabelOffset;
void* sortClassSampleIdxPtr = wsPtr + proposalOffset.sortClassSampleIdxOffset;
void* sortClassValidCountPtr = wsPtr + proposalOffset.sortClassValidCountOffset;
void* sortClassPosPtr = wsPtr + proposalOffset.sortClassPosOffset;
void* sortNMSMarkPtr = wsPtr + proposalOffset.sortNMSMarkOffset;
hipError_t status = hipSuccess;
int NClass = param.numClasses;
assert(NClass == 1);
CUASSERT(hipMemsetAsync(argMaxScorePtr, 0, N * samples * sizeof(dtype), stream));
CUASSERT(hipMemsetAsync(argMaxBBoxPtr, 0, N * samples * 4 * sizeof(dtype), stream));
CUASSERT(hipMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream));
CUASSERT(hipMemsetAsync(sortClassPosPtr, 0, N * (NClass + 1) * sizeof(int), stream));
CUASSERT(hipMemsetAsync(sortClassSampleIdxPtr, 0, N * (samples + 1) * sizeof(int), stream));
CUASSERT(hipGetLastError());
// Here, inDelta are converted to normalize coordinates based on anchors
status = DecodeBBoxes(
stream, N, inputCnt, regWeight, inputHeight, inputWidth, inAnchors, inDelta, const_cast<void*>(inDelta), dtype);
CUASSERT(hipGetLastError());
// sort the score
// d_key_in: preRefineScorePtr [N, inputCnt, 1]
// d_key_out: preRefineSortedScorePtr
// d_values_in: inDelta [N, inputCnt, 4]
// d_values_out: preRefineBboxPtr
// num_items: inputCnt*N
// num_segments: N
// offsets: [0, inputCnt, inputCnt*2, ..., ]
int* offsets = static_cast<int*>(tempStoragePtr);
hipLaunchKernelGGL(( set_offset_kernel), dim3(1), dim3(1024), 0, stream, inputCnt, N + 1, offsets);
CUASSERT(hipGetLastError());
tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1));
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
score_bbox_cub_sort<float>(tempStoragePtr, inScore, preRefineSortedScorePtr,
inDelta, preRefineBboxPtr, N * inputCnt, N,
offsets, stream);
break;
}
case nvinfer1::DataType::kHALF:
{
score_bbox_cub_sort<__half>(tempStoragePtr, inScore, preRefineSortedScorePtr,
inDelta, preRefineBboxPtr, N * inputCnt, N,
offsets, stream);
break;
}
default: assert(false);
}
if (NClass == 1)
{ // Only one class
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
hipLaunchKernelGGL(( resample_kernel<float>), dim3(N), dim3(dMIN(samples, 1024)), 0, stream,
inputCnt, samples, preRefineSortedScorePtr, preRefineBboxPtr, argMaxScorePtr, argMaxBBoxPtr);
CUASSERT(hipGetLastError());
break;
}
case nvinfer1::DataType::kHALF:
{
hipLaunchKernelGGL(( resample_kernel<__half>), dim3(N), dim3(dMIN(samples, 1024)), 0, stream,
inputCnt, samples, preRefineSortedScorePtr, preRefineBboxPtr, argMaxScorePtr, argMaxBBoxPtr);
CUASSERT(hipGetLastError());
break;
}
default: assert(false);
}
int threads = 512;
int blocks = (N * samples + threads - 1) / threads;
blocks = dMIN(blocks, 8);
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
hipLaunchKernelGGL(( resetMemValue_kernel<float>), dim3(blocks), dim3(threads), 0, stream, argMaxLabelPtr, N * samples, 0);
CUASSERT(hipGetLastError());
break;
}
case nvinfer1::DataType::kHALF: {
hipLaunchKernelGGL(( resetMemValue_kernel<__half>), dim3(blocks), dim3(threads), 0, stream, argMaxLabelPtr, N * samples, 0);
CUASSERT(hipGetLastError());
break;
}
default: assert(false);
}
}
if (samples <= 1024)
{
status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 2048)
{
status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 4096)
{
status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else
{
assert(false && "unsupported sortPerClass");
return hipErrorLaunchFailure;
}
CUASSERT(hipGetLastError());
status = PerClassNMS<1024>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr,
// sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr);
CUASSERT(hipGetLastError());
status = KeepTopKGatherBoxScore<512>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr,
sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outScore, outBbox,
1);
CUASSERT(hipGetLastError());
return status;
}
struct BBOX
{
float y1, x1, y2, x2;
};
struct DELTA
{
float dy, dx, logdh, logdw;
};
struct DELTA_HALF
{
__half dy, dx, logdh, logdw;
};
__global__ void decode_bboxes_kernel(int samples, const void* anchors, const void* delta, const float* regWeight,
const float inputHeight, const float inputWidth, void* outputBbox, float bboxClipThresh)
{
const BBOX* anchors_in = static_cast<const BBOX*>(anchors);
const DELTA* delta_in = static_cast<const DELTA*>(delta);
BBOX* bbox_out = static_cast<BBOX*>(outputBbox);
int N = blockIdx.x;
int blockOffset = N * samples;
int totalItems = (samples + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < samples)
{
BBOX cur_anchor_yxyx = anchors_in[blockOffset + cur_id];
// convert yxyx -> cyxhw
// cy, cx, h, w
/*BBOX cur_anchor_cyxhw;*/
float cur_anchor_h = (cur_anchor_yxyx.y2 - cur_anchor_yxyx.y1 + 1.0);
float cur_anchor_w = (cur_anchor_yxyx.x2 - cur_anchor_yxyx.x1 + 1.0); // w
float cur_anchor_yc = cur_anchor_yxyx.y1 + cur_anchor_h * 0.5; // cy
float cur_anchor_xc = cur_anchor_yxyx.x1 + cur_anchor_w * 0.5; // cx
DELTA cur_delta = delta_in[blockOffset + cur_id];
// divided by regWeight
cur_delta.dy /= regWeight[0];
cur_delta.dx /= regWeight[1];
cur_delta.logdh /= regWeight[2];
cur_delta.logdw /= regWeight[3];
cur_delta.logdh = dMIN(cur_delta.logdh, bboxClipThresh);
cur_delta.logdw = dMIN(cur_delta.logdw, bboxClipThresh);
// apply delta
float decoded_box_yc = cur_anchor_yc + cur_delta.dy * cur_anchor_h;
float decoded_box_xc = cur_anchor_xc + cur_delta.dx * cur_anchor_w;
float decoded_box_h = expf(cur_delta.logdh) * cur_anchor_h;
float decoded_box_w = expf(cur_delta.logdw) * cur_anchor_w;
float decoded_box_ymin = decoded_box_yc - 0.5 * decoded_box_h;
float decoded_box_xmin = decoded_box_xc - 0.5 * decoded_box_w;
float decoded_box_ymax = decoded_box_ymin + decoded_box_h - 1.0;
float decoded_box_xmax = decoded_box_xmin + decoded_box_w - 1.0;
// clip bbox: a more precision clip method based on real window could be implemented
decoded_box_ymin = dMAX(dMIN(decoded_box_ymin, inputHeight - 1.0), 0.0);
decoded_box_xmin = dMAX(dMIN(decoded_box_xmin, inputWidth - 1.0), 0.0);
decoded_box_ymax = dMAX(dMIN(decoded_box_ymax, inputHeight - 1.0), 0.0);
decoded_box_xmax = dMAX(dMIN(decoded_box_xmax, inputWidth - 1.0), 0.0);
bbox_out[blockOffset + cur_id].y1 = decoded_box_ymin;
bbox_out[blockOffset + cur_id].x1 = decoded_box_xmin;
bbox_out[blockOffset + cur_id].y2 = decoded_box_ymax;
bbox_out[blockOffset + cur_id].x2 = decoded_box_xmax;
}
}
}
__global__ void decode_bboxes_kernel_half(int samples, const void* anchors, const void* delta, const float* regWeight,
const float inputHeight, const float inputWidth, void* outputBbox, float bboxClipThresh)
{
const BBoxT<float>* anchors_in = static_cast<const BBoxT<float>*>(anchors);
const DELTA_HALF* delta_in = static_cast<const DELTA_HALF*>(delta);
BBoxT<__half>* bbox_out = static_cast<BBoxT<__half>*>(outputBbox);
int N = blockIdx.x;
int blockOffset = N * samples;
int totalItems = (samples + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < samples)
{
BBoxT<float> cur_anchor_yxyx = anchors_in[blockOffset + cur_id];
// convert yxyx -> cyxhw
// cy, cx, h, w
float cur_anchor_h = (cur_anchor_yxyx.y2 - cur_anchor_yxyx.y1 + 1.0);
float cur_anchor_w = (cur_anchor_yxyx.x2 - cur_anchor_yxyx.x1 + 1.0); // w
float cur_anchor_yc = cur_anchor_yxyx.y1 + cur_anchor_h * 0.5; // cy
float cur_anchor_xc = cur_anchor_yxyx.x1 + cur_anchor_w * 0.5; // cx
DELTA_HALF cur_delta_half = delta_in[blockOffset + cur_id];
DELTA cur_delta;
cur_delta.dy = __half2float(cur_delta_half.dy);
cur_delta.dx = __half2float(cur_delta_half.dx);
cur_delta.logdh = __half2float(cur_delta_half.logdh);
cur_delta.logdw = __half2float(cur_delta_half.logdw);
// divided by regWeight
cur_delta.dy /= regWeight[0];
cur_delta.dx /= regWeight[1];
cur_delta.logdh /= regWeight[2];
cur_delta.logdw /= regWeight[3];
cur_delta.logdh = dMIN(cur_delta.logdh, bboxClipThresh);
cur_delta.logdw = dMIN(cur_delta.logdw, bboxClipThresh);
// apply delta
float decoded_box_yc = cur_anchor_yc + cur_delta.dy * cur_anchor_h;
float decoded_box_xc = cur_anchor_xc + cur_delta.dx * cur_anchor_w;
float decoded_box_h = expf(cur_delta.logdh) * cur_anchor_h;
float decoded_box_w = expf(cur_delta.logdw) * cur_anchor_w;
float decoded_box_ymin = decoded_box_yc - 0.5 * decoded_box_h;
float decoded_box_xmin = decoded_box_xc - 0.5 * decoded_box_w;
float decoded_box_ymax = decoded_box_ymin + decoded_box_h - 1.0;
float decoded_box_xmax = decoded_box_xmin + decoded_box_w - 1.0;
// clip bbox: a more precision clip method based on real window could be implemented
decoded_box_ymin = dMAX(dMIN(decoded_box_ymin, inputHeight - 1.0), 0.0);
decoded_box_xmin = dMAX(dMIN(decoded_box_xmin, inputWidth - 1.0), 0.0);
decoded_box_ymax = dMAX(dMIN(decoded_box_ymax, inputHeight - 1.0), 0.0);
decoded_box_xmax = dMAX(dMIN(decoded_box_xmax, inputWidth - 1.0), 0.0);
bbox_out[blockOffset + cur_id].y1 = __float2half(decoded_box_ymin);
bbox_out[blockOffset + cur_id].x1 = __float2half(decoded_box_xmin);
bbox_out[blockOffset + cur_id].y2 = __float2half(decoded_box_ymax);
bbox_out[blockOffset + cur_id].x2 = __float2half(decoded_box_xmax);
}
}
}
hipError_t DecodeBBoxes(hipStream_t stream, int N,
int samples, // number of anchors per image
const float* regWeight, const float inputHeight, const float inputWidth,
const void* anchors, // [N, anchors, (y1, x1, y2, x2)]
const void* delta, //[N, anchors, (dy, dx, log(dh), log(dw)])
void* outputBbox, //[N, anchors, (y1, x1, y2, x2)]
nvinfer1::DataType dtype
)
{
int blocks = N;
int threads = dMIN(samples, 1024);
// delta multiply bbox_std
// apply delta steps:
// cy = anchor_cy + dy*height
// cx = anchor_cx + dx*weight
// h = exp(dh)*anchor_h
// w = exp(dw)*anchor_w
// clip the bbox in absolute coordinates
float bboxClipThresh = log(1000.0f / 16.0f);
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
hipLaunchKernelGGL(( decode_bboxes_kernel), dim3(blocks), dim3(threads), 0, stream,
samples, anchors, delta, regWeight, inputHeight, inputWidth, outputBbox, bboxClipThresh);
break;
}
case nvinfer1::DataType::kHALF:
{
hipLaunchKernelGGL(( decode_bboxes_kernel_half), dim3(blocks), dim3(threads), 0, stream,
samples, anchors, delta, regWeight, inputHeight, inputWidth, outputBbox, bboxClipThresh);
break;
}
default: assert(false);
}
return hipGetLastError();
}
__global__ void apply_delta_kernel(int samples, const void* anchors, const void* delta, void* outputBbox)
{
const BBOX* anchors_in = static_cast<const BBOX*>(anchors);
const DELTA* delta_in = static_cast<const DELTA*>(delta);
BBOX* bbox_out = static_cast<BBOX*>(outputBbox);
int N = blockIdx.x;
int blockOffset = N * samples;
int totalItems = (samples + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < samples)
{
BBOX cur_anchor_yxyx = anchors_in[blockOffset + cur_id];
// convert yxyx -> cyxhw
// cy, cx, h, w
BBOX cur_anchor_cyxhw;
cur_anchor_cyxhw.y1 = (cur_anchor_yxyx.y1 + cur_anchor_yxyx.y2) / 2;
cur_anchor_cyxhw.x1 = (cur_anchor_yxyx.x1 + cur_anchor_yxyx.x2) / 2;
cur_anchor_cyxhw.y2 = (cur_anchor_yxyx.y2 - cur_anchor_yxyx.y1);
cur_anchor_cyxhw.x2 = (cur_anchor_yxyx.x2 - cur_anchor_yxyx.x1);
DELTA cur_delta = delta_in[blockOffset + cur_id];
// multiply std_dev
cur_delta.dy *= 0.1;
cur_delta.dx *= 0.1;
cur_delta.logdh *= 0.2;
cur_delta.logdw *= 0.2;
// apply delta
cur_anchor_cyxhw.y1 += cur_delta.dy * cur_anchor_cyxhw.y2;
cur_anchor_cyxhw.x1 += cur_delta.dx * cur_anchor_cyxhw.x2;
cur_anchor_cyxhw.y2 *= expf(cur_delta.logdh);
cur_anchor_cyxhw.x2 *= expf(cur_delta.logdw);
cur_anchor_yxyx.y1 = cur_anchor_cyxhw.y1 - 0.5 * cur_anchor_cyxhw.y2;
cur_anchor_yxyx.x1 = cur_anchor_cyxhw.x1 - 0.5 * cur_anchor_cyxhw.x2;
cur_anchor_yxyx.y2 = cur_anchor_yxyx.y1 + cur_anchor_cyxhw.y2;
cur_anchor_yxyx.x2 = cur_anchor_yxyx.x1 + cur_anchor_cyxhw.x2;
// clip bbox: a more precision clip method based on real window could be implemented
cur_anchor_yxyx.y1 = dMAX(dMIN(cur_anchor_yxyx.y1, 1.0), 0.0);
cur_anchor_yxyx.x1 = dMAX(dMIN(cur_anchor_yxyx.x1, 1.0), 0.0);
cur_anchor_yxyx.y2 = dMAX(dMIN(cur_anchor_yxyx.y2, 1.0), 0.0);
cur_anchor_yxyx.x2 = dMAX(dMIN(cur_anchor_yxyx.x2, 1.0), 0.0);
bbox_out[blockOffset + cur_id].y1 = cur_anchor_yxyx.y1;
bbox_out[blockOffset + cur_id].x1 = cur_anchor_yxyx.x1;
bbox_out[blockOffset + cur_id].y2 = cur_anchor_yxyx.y2;
bbox_out[blockOffset + cur_id].x2 = cur_anchor_yxyx.x2;
}
}
}
hipError_t ApplyDelta2Bboxes(hipStream_t stream, int N,
int samples, // number of anchors per image
const void* anchors, // [N, anchors, (y1, x1, y2, x2)]
const void* delta, //[N, anchors, (dy, dx, log(dh), log(dw)])
void* outputBbox //[N, anchors, (y1, x1, y2, x2)]
)
{
int blocks = N;
int threads = dMIN(samples, 1024);
// delta multiply bbox_std
// apply delta steps:
// cy = anchor_cy + dy*height
// cx = anchor_cx + dx*weight
// h = exp(dh)*anchor_h
// w = exp(dw)*anchor_w
// clip the bbox
hipLaunchKernelGGL(( apply_delta_kernel), dim3(blocks), dim3(threads), 0, stream, samples, anchors, delta, outputBbox);
return hipGetLastError();
}
template <typename Tfeat>
__device__ inline Tfeat interpolateBilinear(const Tfeat* src, xy_t srcDims, float y, float x)
{
const int y0 = static_cast<int>(y);
const float yAlpha = y - static_cast<float>(y0);
const int x0 = static_cast<int>(x);
const float xAlpha = x - static_cast<float>(x0);
assert(y0 < srcDims.y);
assert(x0 < srcDims.x);
const int y1 = (yAlpha == 0) ? y0 : y0 + 1; // ceil
const int x1 = (xAlpha == 0) ? x0 : x0 + 1; // ceil
assert(y1 < srcDims.y);
assert(x1 < srcDims.x);
const Tfeat src00 = src[(y0) *srcDims.x + (x0)];
const Tfeat src01 = src[(y0) *srcDims.x + (x1)];
const Tfeat src10 = src[(y1) *srcDims.x + (x0)];
const Tfeat src11 = src[(y1) *srcDims.x + (x1)];
const Tfeat src0 = src00 * (1.0 - xAlpha) + src01 * xAlpha;
const Tfeat src1 = src10 * (1.0 - xAlpha) + src11 * xAlpha;
return src0 * (1.0 - yAlpha) + src1 * yAlpha;
}
template <>
__device__ inline __half interpolateBilinear(const __half* src, xy_t srcDims, float y, float x)
{
const int y0 = static_cast<int>(y);
const float yAlpha = y - static_cast<float>(y0);
const int x0 = static_cast<int>(x);
const float xAlpha = x - static_cast<float>(x0);
assert(y0 < srcDims.y);
assert(x0 < srcDims.x);
const int y1 = (yAlpha == 0) ? y0 : y0 + 1; // ceil
const int x1 = (xAlpha == 0) ? x0 : x0 + 1; // ceil
assert(y1 < srcDims.y);
assert(x1 < srcDims.x);
const __half src00 = src[(y0) *srcDims.x + (x0)];
const __half src01 = src[(y0) *srcDims.x + (x1)];
const __half src10 = src[(y1) *srcDims.x + (x0)];
const __half src11 = src[(y1) *srcDims.x + (x1)];
const __half src0 = add_fb(mul_fb(src00, (1.0 - xAlpha)), mul_fb(src01, xAlpha));
const __half src1 = add_fb(mul_fb(src10, (1.0 - xAlpha)), mul_fb(src11, xAlpha));
return add_fb(mul_fb(src0, (1.0 - yAlpha)), mul_fb(src1, yAlpha));
}
template <typename Trois, typename Tfeat>
__global__ void roiAlign_kernel(int featureCount, int roiCount,
float threshold, const Trois* rois,
const Tfeat* P2, const xy_t P2dims, const Tfeat* P3, const xy_t P3dims, const Tfeat* P4, const xy_t P4dims,
const Tfeat* P5, const xy_t P5dims,
Tfeat* pooled, const xy_t poolDims)
{
const int batch = blockIdx.x;
const int feature = blockIdx.y;
for (int roiIdx = threadIdx.x; roiIdx < roiCount; roiIdx += blockDim.x)
{
const Trois* roi = rois + 4 * (batch * roiCount + roiIdx);
const float y1 = roi[0];
const float x1 = roi[1];
const float y2 = roi[2];
const float x2 = roi[3];
if (!(0 <= y1 && y1 <= 1 && 0 <= x1 && x1 <= 1 && 0 <= y2 && y2 <= 1 && 0 <= x2 && x2 <= 1 && y1 < y2
&& x1 < x2))
{
continue;
}
else
{
}
const float hw = (y2 - y1) * (x2 - x1);
const Tfeat* src = P2;
xy_t srcDims = P2dims;
int iP = 2;
if (hw > threshold)
{
src = P3;
srcDims = P3dims;
++iP;
}
threshold *= 4;
if (hw > threshold)
{
src = P4;
srcDims = P4dims;
++iP;
}
threshold *= 4;
if (hw > threshold)
{
src = P5;
srcDims = P5dims;
++iP;
}
src += srcDims.x * srcDims.y * (batch * featureCount + feature);
Tfeat* dst
= pooled + poolDims.x * poolDims.y * (batch * roiCount * featureCount + roiIdx * featureCount + feature);
const float yStart = y1 * (srcDims.y - 1);
const float xStart = x1 * (srcDims.x - 1);
const float yEnd = y2 * (srcDims.y - 1);
const float xEnd = x2 * (srcDims.x - 1);
const float yDelta = (yEnd - yStart) / (poolDims.y - 1);
const float xDelta = (xEnd - xStart) / (poolDims.x - 1);
for (int yy = 0; yy < poolDims.y; ++yy)
{
const float ySample = min(yStart + yDelta * yy, yEnd);
for (int xx = 0; xx < poolDims.x; ++xx)
{
const float xSample = min(xStart + xDelta * xx, xEnd);
float result = interpolateBilinear(src, srcDims, ySample, xSample);
*dst = result;
dst++;
}
}
}
}
hipError_t roiAlign(hipStream_t stream, int batchSize, int featureCount, int roiCount, float firstThreshold,
const void* rois, const void* const layers[], const xy_t* layerDims,
void* pooled, const xy_t poolDims)
{
const dim3 blocks(batchSize, featureCount);
const int threads(256);
hipLaunchKernelGGL(( roiAlign_kernel), dim3(blocks), dim3(threads), 0, stream, featureCount, roiCount, firstThreshold,
static_cast<const float*>(rois),
static_cast<const float*>(layers[0]), layerDims[0], static_cast<const float*>(layers[1]), layerDims[1],
static_cast<const float*>(layers[2]), layerDims[2], static_cast<const float*>(layers[3]), layerDims[3],
static_cast<float*>(pooled), poolDims);
return hipGetLastError();
}
template <typename Trois, typename Tfeat>
__global__ void roiAlignHalfCenter_kernel(int featureCount, int roiCount,
float threshold, int inputHeight, int inputWidth, const void* rois_,
const void* const P2_, const xy_t P2dims, const void* const P3_, const xy_t P3dims, const void* const P4_, const xy_t P4dims,
const void* const P5_, const xy_t P5dims, const void* const P6_, const xy_t P6dims,
void* pooled_, const xy_t poolDims)
{
const Trois* rois = static_cast<const Trois*>(rois_);
const Tfeat* P2 = static_cast<const Tfeat*>(P2_);
const Tfeat* P3 = static_cast<const Tfeat*>(P3_);
const Tfeat* P4 = static_cast<const Tfeat*>(P4_);
const Tfeat* P5 = static_cast<const Tfeat*>(P5_);
const Tfeat* P6 = static_cast<const Tfeat*>(P6_);
Tfeat* pooled = static_cast<Tfeat* >(pooled_);
const int batch = blockIdx.x;
const int feature = blockIdx.y;
const int roiIdx = blockIdx.z;
const int total_item_cnt = poolDims.x * poolDims.y;
for (int itemIdx = threadIdx.x; itemIdx < total_item_cnt; itemIdx += blockDim.x)
{
const Trois* roi = rois + 4 * (batch * roiCount + roiIdx);
const float y1 = roi[0];
const float x1 = roi[1];
const float y2 = roi[2];
const float x2 = roi[3];
if (!(0 <= y1 && y1 <= inputHeight && 0 <= x1 && x1 <= inputWidth && 0 <= y2 && y2 <= inputHeight && 0 <= x2
&& x2 <= inputWidth && y1 < y2 && x1 < x2))
{
continue;
}
else
{
}
const float hw = (y2 - y1) * (x2 - x1);
const Tfeat* src = P2;
xy_t srcDims = P2dims;
int iP = 2;
float threshold_per_item = threshold;
if (hw > threshold_per_item)
{
src = P3;
srcDims = P3dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P4;
srcDims = P4dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P5;
srcDims = P5dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P6;
srcDims = P6dims;
++iP;
}
src += srcDims.x * srcDims.y * (batch * featureCount + feature);
Tfeat* dst
= pooled + poolDims.x * poolDims.y * (batch * roiCount * featureCount + roiIdx * featureCount + feature) + itemIdx;
float scale_to_level = 1.0f;
for (int i = 0; i < iP; i++)
{
scale_to_level *= 2.0f;
}
const float yStart = y1 / scale_to_level;
const float xStart = x1 / scale_to_level;
const float yEnd = y2 / scale_to_level;
const float xEnd = x2 / scale_to_level;
const float yDelta = (yEnd - yStart) / (poolDims.y);
const float xDelta = (xEnd - xStart) / (poolDims.x);
const int yy = itemIdx / poolDims.y;
const int xx = itemIdx % poolDims.x;
const float ySample = dMIN(dMAX(yStart + yDelta * (yy + 0.5), 0.0f), srcDims.y - 1.0f);
const float xSample = dMIN(dMAX(xStart + xDelta * (xx + 0.5), 0.0f), srcDims.x - 1.0f);
Tfeat result = interpolateBilinear<Tfeat>(src, srcDims, ySample, xSample);
*dst = result;
}
}
template <>
__global__ void roiAlignHalfCenter_kernel<__half, __half>(int featureCount, int roiCount,
float threshold, int inputHeight, int inputWidth, const void* rois_,
const void* const P2_, const xy_t P2dims, const void* const P3_, const xy_t P3dims, const void* const P4_, const xy_t P4dims,
const void* const P5_, const xy_t P5dims, const void* const P6_, const xy_t P6dims,
void* pooled_, const xy_t poolDims)
{
const __half* rois = static_cast<const __half*>(rois_);
const __half* P2 = static_cast<const __half*>(P2_);
const __half* P3 = static_cast<const __half*>(P3_);
const __half* P4 = static_cast<const __half*>(P4_);
const __half* P5 = static_cast<const __half*>(P5_);
const __half* P6 = static_cast<const __half*>(P6_);
__half* pooled = static_cast<__half* >(pooled_);
const int batch = blockIdx.x;
const int feature = blockIdx.y;
const int roiIdx = blockIdx.z;
const int total_item_cnt = poolDims.x * poolDims.y;
for (int itemIdx = threadIdx.x; itemIdx < total_item_cnt; itemIdx += blockDim.x)
{
const __half* roi = rois + 4 * (batch * roiCount + roiIdx);
const float y1 = __half2float(roi[0]);
const float x1 = __half2float(roi[1]);
const float y2 = __half2float(roi[2]);
const float x2 = __half2float(roi[3]);
if (!(0 <= y1 && y1 <= inputHeight && 0 <= x1 && x1 <= inputWidth && 0 <= y2 && y2 <= inputHeight && 0 <= x2
&& x2 <= inputWidth && y1 < y2 && x1 < x2))
{
continue;
}
else
{
}
const float hw = (y2 - y1) * (x2 - x1);
const __half* src = P2;
xy_t srcDims = P2dims;
int iP = 2;
float threshold_per_item = threshold;
if (hw > threshold_per_item)
{
src = P3;
srcDims = P3dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P4;
srcDims = P4dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P5;
srcDims = P5dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P6;
srcDims = P6dims;
++iP;
}
src += srcDims.x * srcDims.y * (batch * featureCount + feature);
__half* dst
= pooled + poolDims.x * poolDims.y * (batch * roiCount * featureCount + roiIdx * featureCount + feature) + itemIdx;
float scale_to_level = 1.0f;
for (int i = 0; i < iP; i++)
{
scale_to_level *= 2.0f;
}
const float yStart = y1 / scale_to_level;
const float xStart = x1 / scale_to_level;
const float yEnd = y2 / scale_to_level;
const float xEnd = x2 / scale_to_level;
const float yDelta = (yEnd - yStart) / (poolDims.y);
const float xDelta = (xEnd - xStart) / (poolDims.x);
const int yy = itemIdx / poolDims.y;
const int xx = itemIdx % poolDims.x;
const float ySample = dMIN(dMAX(yStart + yDelta * (yy + 0.5), 0.0f), srcDims.y - 1.0f);
const float xSample = dMIN(dMAX(xStart + xDelta * (xx + 0.5), 0.0f), srcDims.x - 1.0f);
__half result = interpolateBilinear<__half>(src, srcDims, ySample, xSample);
*dst = result;
}
}
hipError_t roiAlignHalfCenter(hipStream_t stream, int batchSize, int featureCount, int roiCount, float firstThreshold,
int inputHeight, int inputWidth, const void* rois, const void* const layers[], const xy_t* layerDims,
void* pooled, const xy_t poolDims, const DataType dtype)
{
const dim3 blocks(batchSize, featureCount, roiCount);
const int threads(64);
switch (dtype){
case nvinfer1::DataType::kFLOAT:
{
hipLaunchKernelGGL(( roiAlignHalfCenter_kernel<float, float>), dim3(blocks), dim3(threads), 0, stream, featureCount, roiCount, firstThreshold, inputHeight,
inputWidth, rois, layers[0], layerDims[0],
layers[1], layerDims[1], layers[2], layerDims[2],
layers[3], layerDims[3], layers[4], layerDims[4],
pooled, poolDims);
break;
}
case nvinfer1::DataType::kHALF:
{
hipLaunchKernelGGL(( roiAlignHalfCenter_kernel<__half, __half>), dim3(blocks), dim3(threads), 0, stream, featureCount, roiCount, firstThreshold, inputHeight,
inputWidth, rois, layers[0], layerDims[0],
layers[1], layerDims[1], layers[2], layerDims[2],
layers[3], layerDims[3], layers[4], layerDims[4],
pooled, poolDims);
break;
}
default: assert(false);
}
return hipGetLastError();
}
__global__ void resize_nearest_kernel_2d(int nbatch, float scale, int2 osize, float const* idata, int istride,
int ibatchstride, float* odata, int ostride, int obatchstride)
{
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int y0 = threadIdx.y + blockIdx.y * blockDim.y;
int z0 = blockIdx.z;
for (int batch = z0; batch < nbatch; batch += gridDim.z)
{
for (int oy = y0; oy < osize.y; oy += blockDim.y * gridDim.y)
{
for (int ox = x0; ox < osize.x; ox += blockDim.x * gridDim.x)
{
int ix = int(ox / scale);
int iy = int(oy / scale);
odata[batch * obatchstride + oy * ostride + ox] = idata[batch * ibatchstride + iy * istride + ix];
}
}
}
}
void resizeNearest(dim3 grid, dim3 block, hipStream_t stream, int nbatch, float scale, int2 osize, float const* idata,
int istride, int ibatchstride, float* odata, int ostride, int obatchstride)
{
hipLaunchKernelGGL(( resize_nearest_kernel_2d), dim3(grid), dim3(block), 0, stream,
nbatch, scale, osize, idata, istride, ibatchstride, odata, ostride, obatchstride);
}
struct BOX
{
float y1, x1, y2, x2;
};
struct DETECTION
{
float y1, x1, y2, x2, class_id, score;
};
__global__ void specialslice_kernel(int samples, const void* idata, void* odata)
{
int N = blockIdx.x;
int blockOffset = N * samples;
int totalItems = (samples + (blockDim.x - 1)) / blockDim.x;
const DETECTION* in_detections = static_cast<const DETECTION*>(idata);
BOX* out_bboxes = static_cast<BOX*>(odata);
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < samples)
{
out_bboxes[blockOffset + cur_id].y1 = in_detections[blockOffset + cur_id].y1;
out_bboxes[blockOffset + cur_id].x1 = in_detections[blockOffset + cur_id].x1;
out_bboxes[blockOffset + cur_id].y2 = in_detections[blockOffset + cur_id].y2;
out_bboxes[blockOffset + cur_id].x2 = in_detections[blockOffset + cur_id].x2;
}
}
}
void specialSlice(hipStream_t stream, int batch_size, int boxes_cnt, const void* idata, void* odata)
{
int blocks = batch_size;
int threads = dMIN(boxes_cnt, 2048);
hipLaunchKernelGGL(( specialslice_kernel), dim3(blocks), dim3(threads), 0, stream, boxes_cnt, idata, odata);
}
template <typename Dtype>
__global__ void concatenate(int featureCnt, int sampleCnt, const void* const* inScores, const void* const* inBBox,
void* outScore, void* outBBox)
{
int N = blockIdx.x;
int outBlockOffset = N * sampleCnt * featureCnt;
int inBlockOffset = N * sampleCnt;
int itemsPerThread = (sampleCnt + blockDim.x - 1) / blockDim.x;
Dtype* outScorePtr = static_cast<Dtype*>(outScore);
BBoxT<Dtype>* outBBoxPtr = static_cast<BBoxT<Dtype>*>(outBBox);
for (int fId = 0; fId < featureCnt; fId++)
{
const Dtype* fInScorePtr = static_cast<const Dtype*>(inScores[fId]);
const BBoxT<Dtype>* fInBBoxPtr = static_cast<const BBoxT<Dtype>*>(inBBox[fId]);
int featureOffset = fId * sampleCnt;
for (int i = 0; i < itemsPerThread; i++)
{
int curId = i * blockDim.x + threadIdx.x;
if (curId < sampleCnt)
{
outScorePtr[outBlockOffset + featureOffset + curId] = fInScorePtr[inBlockOffset + curId];
outBBoxPtr[outBlockOffset + featureOffset + curId] = fInBBoxPtr[inBlockOffset + curId];
}
}
}
}
template <typename Dtype>
__global__ void resampleBBox_kernel(int orig_size, int sample_size, const void* orig_bbox_ptr, void* sampled_bbox_ptr)
{
const BBoxT<Dtype>* in_bbox = static_cast<const BBoxT<Dtype>*>(orig_bbox_ptr);
BBoxT<Dtype>* out_bbox = static_cast<BBoxT<Dtype>*>(sampled_bbox_ptr);
int N = blockIdx.x;
int blockOffset_in = N * orig_size;
int blockOffset_out = N * sample_size;
int totalItems = (sample_size + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < sample_size)
{
out_bbox[blockOffset_out + cur_id] = in_bbox[blockOffset_in + cur_id];
}
}
}
hipError_t ConcatTopK(hipStream_t stream, int N, int featureCnt, int topK, nvinfer1::DataType dtype, void* workspace,
const ConcatTopKWorkSpace& spaceOffset, void** inScores, void** inBBox, void* outProposals)
{
// Prepare Offset
int8_t* wsPtr = static_cast<int8_t*>(workspace);
void* tempStoragePtr = wsPtr + spaceOffset.tempStorageOffset;
void* concatedScorePtr = wsPtr + spaceOffset.concatedScoreOffset;
void* concatedBBoxPtr = wsPtr + spaceOffset.concatedBBoxOffset;
void* sortedScorePtr = wsPtr + spaceOffset.sortedScoreOffset;
void* sortedBBoxPtr = wsPtr + spaceOffset.sortedBBoxOffset;
int blocks = N; // batch_size
int threads = dMIN(topK, 2048);
// Concat Scores and inBBox
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
hipLaunchKernelGGL(( concatenate<float>)
, dim3(blocks), dim3(threads), 0, stream, featureCnt, topK, inScores, inBBox, concatedScorePtr, concatedBBoxPtr);
CUASSERT(hipGetLastError());
break;
case nvinfer1::DataType::kHALF:
hipLaunchKernelGGL(( concatenate<__half>)
, dim3(blocks), dim3(threads), 0, stream, featureCnt, topK, inScores, inBBox, concatedScorePtr, concatedBBoxPtr);
CUASSERT(hipGetLastError());
break;
default: assert(false);
}
// Sort and sample topK
int itemCnt = topK * featureCnt;
int* offsets = static_cast<int*>(tempStoragePtr);
hipLaunchKernelGGL(( set_offset_kernel), dim3(1), dim3(1024), 0, stream, itemCnt, N + 1, offsets);
assert(hipGetLastError() == hipSuccess);
tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1));
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
score_bbox_cub_sort<float>(tempStoragePtr, concatedScorePtr, sortedScorePtr,
concatedBBoxPtr, sortedBBoxPtr, N * itemCnt, N,
offsets, stream);
break;
}
case nvinfer1::DataType::kHALF:
{
score_bbox_cub_sort<__half>(tempStoragePtr, concatedScorePtr, sortedScorePtr,
concatedBBoxPtr, sortedBBoxPtr, N * itemCnt, N,
offsets, stream);
break;
}
default: assert(false);
}
// Sample
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
hipLaunchKernelGGL(( resampleBBox_kernel<float>), dim3(N), dim3(dMIN(topK, 1024)), 0, stream, itemCnt, topK, sortedBBoxPtr, outProposals);
CUASSERT(hipGetLastError());
break;
case nvinfer1::DataType::kHALF:
hipLaunchKernelGGL(( resampleBBox_kernel<__half>), dim3(N), dim3(dMIN(topK, 1024)), 0, stream, itemCnt, topK, sortedBBoxPtr, outProposals);
CUASSERT(hipGetLastError());
break;
default: assert(false);
}
assert(hipGetLastError() == hipSuccess);
return hipGetLastError();
}
| aca364ba207fa14d72e9fb4abd1210f8c1948afc.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include "maskRCNNKernels.h"
#include "plugin.h"
#include <NvInfer.h>
#include <assert.h>
#include <cub/cub.cuh>
#include <iostream>
#include <stdio.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#define DUBUG_KERNEL 0
#define DUBUG_BATCH 0
#define DEBUG_T 1
#define dMIN(a, b) ((a) < (b) ? (a) : (b))
#define dMAX(a, b) ((a) > (b) ? (a) : (b))
#define dCLAMP(x, xMin, xMax) ((x) > (xMin) ? ((x) < (xMax) ? (x) : (xMax)) : (xMin))
template <typename BoxType>
struct BBoxT
{
BoxType y1, x1, y2, x2;
};
inline __device__ __half mul_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a * b;
#else
return __float2half(__half2float(a) * __half2float(b));
#endif
}
inline __device__ __half add_fb(const __half & a, const half & b) {
#if __CUDA_ARCH__ >= 530
return a + b;
#else
return __float2half(__half2float(a) + __half2float(b));
#endif
}
template <typename DType>
__global__ void argMaxReset_kernel(
int samples, int NClass, const DType* in_scores, const int* maxIdx, DType* out_scores)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int max_idx = samples * NClass;
if (idx >= max_idx)
return;
int sampleIdx = idx / NClass;
int classIdx = idx % NClass;
if (classIdx != maxIdx[sampleIdx])
out_scores[idx] = 0;
else
out_scores[idx] = in_scores[idx];
}
template <typename DType>
struct ScanItem
{
DType data;
int idx;
};
template <typename DType>
struct GreaterItem
{
__host__ __device__ __forceinline__ ScanItem<DType> operator()(
const ScanItem<DType>& a, const ScanItem<DType>& b) const
{
return (a.data > b.data ? a : b);
}
};
template <typename DType>
__global__ void resetMemValue_kernel(void* outPtr, int samples, float val)
{
DType* out = static_cast<DType*>(outPtr);
int loop = gridDim.x * blockDim.x;
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < samples; idx += loop)
{
out[idx] = (DType) val;
}
}
template <>
__global__ void resetMemValue_kernel<half>(void* outPtr, int samples, float val)
{
__half* out = static_cast<__half*>(outPtr);
int loop = gridDim.x * blockDim.x;
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < samples; idx += loop)
{
out[idx] = __float2half(val);
}
}
// blockDim.x : NClass
// GroupDim.x : sample count
// GroupDim.y : batch N
// outScore : DType[ N * sample * 1 ]
// outLabel : int[ N * sample * 1 ]
// outBbox : int[ N * sample * 4 ]
template <typename DType, typename BoxType, int Threads = 32>
__global__ void argMaxGroup_kernel(int samples, int start_class_id, int NClass, const void* inScorePtr,
const void* inBboxPtr, const void* validSampleCountPtr, void* outScorePtr, void* outLabelPtr, void* outBboxPtr)
{
const DType* inScore = static_cast<const DType*>(inScorePtr);
const BoxType* inBbox = static_cast<const BoxType*>(inBboxPtr);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
DType* outScore = static_cast<DType*>(outScorePtr);
BoxType* outLabel = static_cast<BoxType*>(outLabelPtr);
BoxType* outBbox = static_cast<BoxType*>(outBboxPtr);
const int N = blockIdx.y;
const int validSamples = validSampleCount[N];
typedef ScanItem<DType> ScanItemD;
typedef cub::BlockReduce<ScanItemD, Threads> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int iSample = blockIdx.x; iSample < validSamples; iSample += gridDim.x)
{
int classOffset = (N * samples + iSample) * NClass; // start from [batch, count, class0]
// total IPerThread * blockDim
ScanItemD maxItem = {0.0f, -1};
for (int i = start_class_id; i < NClass; i += Threads)
{
int curIdx = i + threadIdx.x;
ScanItemD item = {0.0f, -1};
if (curIdx < NClass)
{
item.data = inScore[classOffset + curIdx];
item.idx = curIdx;
}
const int validNum = (NClass - i > Threads ? Threads : NClass - i);
ScanItemD aggregate = BlockReduce(temp_storage).Reduce(item, GreaterItem<DType>(), validNum);
__syncthreads();
if (aggregate.data > maxItem.data)
{
maxItem = aggregate;
}
#if DUBUG_KERNEL
if (N == DUBUG_BATCH && threadIdx.x == 0 && iSample < 15 /*&& maxItem.idx >= 32*/)
{
printf("argMaxGroup N:%d, iSample:%d, maxItem(score:%.3f, idx:%d)validReduceNum:%d\n", N, iSample,
(float) maxItem.data, maxItem.idx, validNum);
}
#endif
}
const int dstOffset = N * samples + iSample;
if (threadIdx.x == 0)
{
outScore[dstOffset] = maxItem.data;
outLabel[dstOffset] = (BoxType) maxItem.idx;
outBbox[dstOffset * 4] = inBbox[(classOffset + maxItem.idx) * 4];
outBbox[dstOffset * 4 + 1] = inBbox[(classOffset + maxItem.idx) * 4 + 1];
outBbox[dstOffset * 4 + 2] = inBbox[(classOffset + maxItem.idx) * 4 + 2];
outBbox[dstOffset * 4 + 3] = inBbox[(classOffset + maxItem.idx) * 4 + 3];
}
}
}
struct BlockClassSumPrefix
{
int total;
// Constructor
__device__ BlockClassSumPrefix()
: total(0)
{
}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide scan.
__device__ int operator()(int aggregate)
{
int old = total;
total += aggregate;
return old;
}
};
#define LabelShift (2.5f)
#define MinValidScore (0.01f)
#define ScoreShift (1.0f)
template <typename DType>
__device__ __forceinline__ DType getKey(DType score, int lable, int NClass)
{
return (lable < 0 ? (DType) 0 : ((DType)(NClass - lable - 1) * LabelShift + score + ScoreShift));
}
template <typename DType, typename BoxType>
__device__ __forceinline__ void getScoreLable(DType key, int NClass, DType& score, BoxType& lable)
{
int i = key / LabelShift;
score = (key <= ScoreShift ? (DType) 0 : key - (DType) i * LabelShift - ScoreShift);
score = dCLAMP(score, (DType) 0, (DType) 1.0);
lable = (BoxType)(key <= ScoreShift ? -1 : (NClass - i - 1));
}
// blockDim.x : threads
// gridDim.x : batch N
// validSampleCount INPUT : int [N]
// classStartPos OUTPUT: int [N * (Class + 1)], need memset to zero before this kernel
// outScore OUTPUT : DType [N * samples]
// outLabel OUTPUT : int [N * samples]
// outSampleIdx OUTPUT : int [N * samples]
// outValidSampleCount : int [N]
// IPerThread * Threads >= sample-count
#define MaxClassNum 255
template <typename DType, typename BoxType, int Threads = 256, int IPerThread = 4>
__global__ void sortPerClass_kernel(
// int N,
int samples, int NClass, int background, float scoreThreshold, const void* validSampleCountPtr,
const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, void* classStartPosPtr, void* outScorePtr,
void* outLabelPtr, void* outSampleIdxPtr, void* outValidSampleCountPtr)
{
typedef cub::BlockExchange<DType, Threads, IPerThread> BlockExchangeKey;
typedef cub::BlockExchange<int, Threads, IPerThread> BlockExchangeI;
typedef cub::BlockRadixSort<DType, Threads, IPerThread, int> BlockRadixSort;
typedef cub::BlockScan<int, Threads> BlockScanClass;
__shared__ union
{
typename BlockExchangeKey::TempStorage storageKey;
typename BlockExchangeI::TempStorage storageI;
typename BlockRadixSort::TempStorage storageSort;
typename BlockScanClass::TempStorage storageScan;
} temp_storage;
__shared__ int smemClassCount[MaxClassNum];
assert(NClass < MaxClassNum);
assert(IPerThread * Threads >= samples);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
const DType* inScore = static_cast<const DType*>(inScorePtr);
const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr);
int* classStartPos = static_cast<int*>(classStartPosPtr);
DType* outScore = static_cast<DType*>(outScorePtr);
BoxType* outLabel = static_cast<BoxType*>(outLabelPtr);
int* outSampleIdx = static_cast<int*>(outSampleIdxPtr);
int* outValidSampleCount = static_cast<int*>(outValidSampleCountPtr);
for (int s = threadIdx.x; s < NClass + 1; s += blockDim.x)
{
smemClassCount[s] = 0;
}
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
DType key[IPerThread];
int iSample[IPerThread];
for (int i = 0; i < IPerThread; ++i)
{
iSample[i] = -1;
key[i] = -1.0f;
int curIdx = i * Threads + threadIdx.x;
if (curIdx < validSamples)
{
int label = (int) (inLabel[blockOffset + curIdx]);
DType score = inScore[blockOffset + curIdx];
if (label != background && label != -1 && score >= scoreThreshold)
{
key[i] = getKey(score, label, NClass);
iSample[i] = curIdx;
}
}
}
BlockExchangeKey(temp_storage.storageKey).StripedToBlocked(key);
__syncthreads();
BlockExchangeI(temp_storage.storageI).StripedToBlocked(iSample);
__syncthreads();
BlockRadixSort(temp_storage.storageSort).SortDescendingBlockedToStriped(key, iSample);
__syncthreads();
// store Idx
cub::StoreDirectStriped<Threads>(threadIdx.x, outSampleIdx + blockOffset, iSample, validSamples);
BoxType lable[IPerThread];
DType score[IPerThread];
#pragma unroll
for (int i = 0; i < IPerThread; ++i)
{
getScoreLable(key[i], NClass, score[i], lable[i]);
}
cub::StoreDirectStriped<Threads>(threadIdx.x, outScore + blockOffset, score, validSamples);
cub::StoreDirectStriped<Threads>(threadIdx.x, outLabel + blockOffset, lable, validSamples);
// final
for (int i = 0; i < IPerThread; ++i)
{
if (lable[i] >= (BoxType) 0)
{
atomicAdd(&smemClassCount[(int) lable[i]], 1);
}
}
__syncthreads();
int classBlockOffset = N * (NClass + 1); // Exclusive-sum, 1st is 0, last is final sum
#if DUBUG_KERNEL
if (N == DUBUG_BATCH && threadIdx.x == 0)
{
printf("sortPerClass(N:%d) final count of each label, valid samples:%d\n", N, validSamples);
for (int k = 0; k < NClass; ++k)
{
if (smemClassCount[k] > 0)
printf("Batch:%d, L:%d, count:%d, \n", N, k, smemClassCount[k]);
}
}
__syncthreads();
#endif
BlockClassSumPrefix sumPrefix;
for (int s = 0; s < NClass; s += blockDim.x)
{ // s start from block
int iClassSamples = 0;
int iClass = s + threadIdx.x;
if (iClass < NClass)
{
iClassSamples = smemClassCount[iClass];
}
BlockScanClass(temp_storage.storageScan).ExclusiveSum(iClassSamples, iClassSamples, sumPrefix);
__syncthreads();
if (iClass < NClass)
{
classStartPos[classBlockOffset + iClass] = iClassSamples;
}
}
if (threadIdx.x == 0)
{
classStartPos[classBlockOffset + NClass] = sumPrefix.total;
assert(sumPrefix.total <= validSamples); // background data removed.
outValidSampleCount[N] = sumPrefix.total;
#if DUBUG_KERNEL
if (N == DUBUG_BATCH)
printf("After sortPerClass, batch:%d valid samples total:%d\n", N, sumPrefix.total);
#endif
}
}
template <int Threads = 256, int IPerThread = 4>
__global__ void sortPerClass_kernel_half(
// int N,
int samples, int NClass, int background, float scoreThreshold, const void* validSampleCountPtr,
const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, void* classStartPosPtr, void* outScorePtr,
void* outLabelPtr, void* outSampleIdxPtr, void* outValidSampleCountPtr)
{
typedef cub::BlockExchange<float, Threads, IPerThread> BlockExchangeKey;
typedef cub::BlockExchange<int, Threads, IPerThread> BlockExchangeI;
typedef cub::BlockRadixSort<float, Threads, IPerThread, int> BlockRadixSort;
typedef cub::BlockScan<int, Threads> BlockScanClass;
__shared__ union
{
typename BlockExchangeKey::TempStorage storageKey;
typename BlockExchangeI::TempStorage storageI;
typename BlockRadixSort::TempStorage storageSort;
typename BlockScanClass::TempStorage storageScan;
} temp_storage;
__shared__ int smemClassCount[MaxClassNum];
assert(NClass < MaxClassNum);
assert(IPerThread * Threads >= samples);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
const __half* inScore = static_cast<const __half*>(inScorePtr);
const __half* inLabel = static_cast<const __half*>(inLabelPtr);
int* classStartPos = static_cast<int*>(classStartPosPtr);
__half* outScore = static_cast<__half*>(outScorePtr);
__half* outLabel = static_cast<__half*>(outLabelPtr);
int* outSampleIdx = static_cast<int*>(outSampleIdxPtr);
int* outValidSampleCount = static_cast<int*>(outValidSampleCountPtr);
for (int s = threadIdx.x; s < NClass + 1; s += blockDim.x)
{
smemClassCount[s] = 0;
}
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
float key[IPerThread];
int iSample[IPerThread];
for (int i = 0; i < IPerThread; ++i)
{
iSample[i] = -1;
key[i] = -1.0f;
int curIdx = i * Threads + threadIdx.x;
if (curIdx < validSamples)
{
int label = __half2int_rd(inLabel[blockOffset + curIdx]);
float score = __half2float(inScore[blockOffset + curIdx]);
if (label != background && label != -1 && score >= scoreThreshold)
{
key[i] = getKey<float>(score, label, NClass);
iSample[i] = curIdx;
}
}
}
BlockExchangeKey(temp_storage.storageKey).StripedToBlocked(key);
__syncthreads();
BlockExchangeI(temp_storage.storageI).StripedToBlocked(iSample);
__syncthreads();
BlockRadixSort(temp_storage.storageSort).SortDescendingBlockedToStriped(key, iSample);
__syncthreads();
// store Idx
cub::StoreDirectStriped<Threads>(threadIdx.x, outSampleIdx + blockOffset, iSample, validSamples);
__half lable[IPerThread];
__half score[IPerThread];
for (int i = 0; i < IPerThread; ++i)
{
float label_float;
float score_float;
getScoreLable<float>(key[i], NClass, score_float, label_float);
lable[i] = __float2half(label_float);
score[i] = __float2half(score_float);
}
cub::StoreDirectStriped<Threads>(threadIdx.x, outScore + blockOffset, score, validSamples);
cub::StoreDirectStriped<Threads>(threadIdx.x, outLabel + blockOffset, lable, validSamples);
// final
for (int i = 0; i < IPerThread; ++i)
{
if (__half2float(lable[i]) >= 0)
{
atomicAdd(&smemClassCount[__half2int_rd(lable[i])], 1);
}
}
__syncthreads();
int classBlockOffset = N * (NClass + 1); // Exclusive-sum, 1st is 0, last is final sum
#if DUBUG_KERNEL
if (N == DUBUG_BATCH && threadIdx.x == 0)
{
printf("sortPerClass(N:%d) final count of each label, valid samples:%d\n", N, validSamples);
for (int k = 0; k < NClass; ++k)
{
if (smemClassCount[k] > 0)
printf("Batch:%d, L:%d, count:%d, \n", N, k, smemClassCount[k]);
}
}
__syncthreads();
#endif
BlockClassSumPrefix sumPrefix;
for (int s = 0; s < NClass; s += blockDim.x)
{ // s start from block
int iClassSamples = 0;
int iClass = s + threadIdx.x;
if (iClass < NClass)
{
iClassSamples = smemClassCount[iClass];
}
BlockScanClass(temp_storage.storageScan).ExclusiveSum(iClassSamples, iClassSamples, sumPrefix);
__syncthreads();
if (iClass < NClass)
{
classStartPos[classBlockOffset + iClass] = iClassSamples;
}
}
if (threadIdx.x == 0)
{
classStartPos[classBlockOffset + NClass] = sumPrefix.total;
assert(sumPrefix.total <= validSamples); // background data removed.
outValidSampleCount[N] = sumPrefix.total;
#if DUBUG_KERNEL
if (N == DUBUG_BATCH)
printf("After sortPerClass, batch:%d valid samples total:%d\n", N, sumPrefix.total);
#endif
}
}
template <typename DType>
__device__ __forceinline__ BBoxT<DType> readBbox(const BBoxT<DType>* inBbox, int idx)
{
BBoxT<DType> ret = ((BBoxT<DType>*) (inBbox))[idx];
return ret;
}
template <typename DType>
__device__ __forceinline__ DType boxIoU(const BBoxT<DType>& a, const BBoxT<DType>& b)
{
BBoxT<DType> overlap = {
dMAX(a.y1, b.y1), dMAX(a.x1, b.x1), dMIN(a.y2, b.y2), dMIN(a.x2, b.x2),
};
DType oW = overlap.x2 - overlap.x1;
DType oH = overlap.y2 - overlap.y1;
if (oW < (DType) 0 || oH < (DType) 0)
return (DType) 0;
DType oA = oW * oH;
return (oA / ((a.y2 - a.y1) * (a.x2 - a.x1) + (b.y2 - b.y1) * (b.x2 - b.x1) - oA));
}
// PerClassNMS
// gridDim.x : batch-N
// blockDim.x : Threads
// ItemsPerThreads : = divUp(samples, Threads)
// outFlagSamples OUT: int [N * samples]
template <typename DType, typename BoxType, int Threads = 256, int ItemsPerThreads = 4>
__global__ void PerClassNMS_kernel(
// int N,
int samples, int NClass, const float nmsThreshold, const void* validSampleCountPtr,
// const void *inScorePtr,
const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* classStartsPtr,
void* outFlagSamplesPtr)
{
typedef BBoxT<BoxType> BBox;
__shared__ struct
{
BBox refBox[MaxClassNum];
int endIdx[MaxClassNum];
int refIdx[MaxClassNum + 1];
bool markSamples[Threads * ItemsPerThreads];
int done;
} smemClasses;
assert(NClass + 1 < MaxClassNum);
assert(samples <= Threads * ItemsPerThreads);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
// const DType *inScore = static_cast<const DType *>(inScorePtr);
const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr);
const BBox* inBbox = static_cast<const BBox*>(inBboxPtr);
const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr);
const int* classStarts = static_cast<const int*>(classStartsPtr);
int* outFlagSamples = static_cast<int*>(outFlagSamplesPtr);
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
if (threadIdx.x == 0)
{
smemClasses.done = 0;
}
BBox curBox[ItemsPerThreads];
int label[ItemsPerThreads];
#pragma unroll
for (int ite = 0; ite * blockDim.x < validSamples; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
if (curIdx < validSamples)
{
label[ite] = (int) inLabel[blockOffset + curIdx];
curBox[ite] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + curIdx]);
}
else
{
label[ite] = -1;
}
smemClasses.markSamples[curIdx] = (label[ite] < 0 ? false : true);
}
int classBlockOffset = N * (NClass + 1);
for (int i = threadIdx.x; i < NClass + 1; i += blockDim.x)
{
int refIdx = classStarts[classBlockOffset + i];
smemClasses.refIdx[i] = refIdx;
smemClasses.refBox[i] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]);
}
__syncthreads();
for (int i = threadIdx.x; i < NClass; i += blockDim.x)
{
int endIdx = smemClasses.refIdx[i + 1];
smemClasses.endIdx[i] = endIdx;
if (endIdx == smemClasses.refIdx[i])
{
atomicAdd(&smemClasses.done, 1);
}
}
__syncthreads();
#if DUBUG_KERNEL
// print info
if (N == DUBUG_BATCH && threadIdx.x == 0)
{
printf("batch:%d, before starting NMS, done count:%d\n", N, smemClasses.done);
printf("batch:%d, Total num:%d, startPos:\n", N, validSamples);
for (int k = 0; k < NClass; ++k)
{
if (smemClasses.refIdx[k] != smemClasses.endIdx[k])
{
printf("Batch:%d, label:%d [%d : %d], check ref-label:%d\n", N, k, smemClasses.refIdx[k],
smemClasses.endIdx[k], (int) inLabel[blockOffset + smemClasses.refIdx[k]]);
}
}
printf("\n");
}
__syncthreads();
#endif
// class done to check stop point
while (smemClasses.done < NClass)
{
for (int ite = 0; ite * blockDim.x < validSamples; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
int refIdx = -1;
int endIdx = -1;
if (curIdx < validSamples && smemClasses.markSamples[curIdx])
{
if (label[ite] >= 0)
{
refIdx = smemClasses.refIdx[label[ite]];
endIdx = smemClasses.endIdx[label[ite]];
if (curIdx > refIdx && curIdx < endIdx)
{
BBox refBox = smemClasses.refBox[label[ite]];
if (boxIoU(refBox, curBox[ite]) > (DType) nmsThreshold)
{
smemClasses.markSamples[curIdx] = false;
}
}
}
}
}
__syncthreads();
// push refIdx/refBox forward to next mark
// only the refIdx thread to push itself. other threads idle
for (int i = threadIdx.x; i < NClass; i += blockDim.x)
{
int refIdx = smemClasses.refIdx[i];
int endIdx = smemClasses.endIdx[i];
if (refIdx < endIdx)
{
do
{
++refIdx;
} while (refIdx < endIdx && smemClasses.markSamples[refIdx] == false);
smemClasses.refIdx[i] = refIdx;
if (refIdx < endIdx)
{
smemClasses.refBox[i] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]);
}
else
{
atomicAdd(&smemClasses.done, 1);
}
}
}
__syncthreads();
}
// no need to write all data out
for (int segment = 0; segment < validSamples; segment += blockDim.x)
{
int curIdx = segment + threadIdx.x;
if (curIdx < validSamples)
{
outFlagSamples[blockOffset + curIdx] = (smemClasses.markSamples[curIdx] ? 1 : 0);
}
}
}
template <int Threads = 256, int ItemsPerThreads = 4>
__global__ void PerClassNMS_half_kernel(
// int N,
int samples, int NClass, const float nmsThreshold, const void* validSampleCountPtr,
// const void *inScorePtr,
const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* classStartsPtr,
void* outFlagSamplesPtr)
{
typedef BBoxT<__half> BBox;
__shared__ struct
{
BBox refBox[MaxClassNum];
int endIdx[MaxClassNum];
int refIdx[MaxClassNum + 1];
bool markSamples[Threads * ItemsPerThreads];
int done;
} smemClasses;
assert(NClass + 1 < MaxClassNum);
assert(samples <= Threads * ItemsPerThreads);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
// const DType *inScore = static_cast<const DType *>(inScorePtr);
const __half* inLabel = static_cast<const __half*>(inLabelPtr);
const BBox* inBbox = static_cast<const BBox*>(inBboxPtr);
const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr);
const int* classStarts = static_cast<const int*>(classStartsPtr);
int* outFlagSamples = static_cast<int*>(outFlagSamplesPtr);
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
if (threadIdx.x == 0)
{
smemClasses.done = 0;
}
BBox curBox[ItemsPerThreads];
int label[ItemsPerThreads];
#pragma unroll
for (int ite = 0; ite * blockDim.x < validSamples; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
if (curIdx < validSamples)
{
label[ite] = __half2int_rd(inLabel[blockOffset + curIdx]);
curBox[ite] = readBbox<__half>(inBbox, blockOffset + inBboxRefIdx[blockOffset + curIdx]);
}
else
{
label[ite] = -1;
}
smemClasses.markSamples[curIdx] = (label[ite] < 0 ? false : true);
}
int classBlockOffset = N * (NClass + 1);
for (int i = threadIdx.x; i < NClass + 1; i += blockDim.x)
{
int refIdx = classStarts[classBlockOffset + i];
smemClasses.refIdx[i] = refIdx;
smemClasses.refBox[i] = readBbox<__half>(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]);
}
__syncthreads();
for (int i = threadIdx.x; i < NClass; i += blockDim.x)
{
int endIdx = smemClasses.refIdx[i + 1];
smemClasses.endIdx[i] = endIdx;
if (endIdx == smemClasses.refIdx[i])
{
atomicAdd(&smemClasses.done, 1);
}
}
__syncthreads();
#if DUBUG_KERNEL
// print info
if (N == DUBUG_BATCH && threadIdx.x == 0)
{
printf("batch:%d, before starting NMS, done count:%d\n", N, smemClasses.done);
printf("batch:%d, Total num:%d, startPos:\n", N, validSamples);
for (int k = 0; k < NClass; ++k)
{
if (smemClasses.refIdx[k] != smemClasses.endIdx[k])
{
printf("Batch:%d, label:%d [%d : %d], check ref-label:%d\n", N, k, smemClasses.refIdx[k],
smemClasses.endIdx[k], (int) inLabel[blockOffset + smemClasses.refIdx[k]]);
}
}
printf("\n");
}
__syncthreads();
#endif
// class done to check stop point
while (smemClasses.done < NClass)
{
for (int ite = 0; ite * blockDim.x < validSamples; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
int refIdx = -1;
int endIdx = -1;
if (curIdx < validSamples && smemClasses.markSamples[curIdx])
{
if (label[ite] >= 0)
{
refIdx = smemClasses.refIdx[label[ite]];
endIdx = smemClasses.endIdx[label[ite]];
if (curIdx > refIdx && curIdx < endIdx)
{
BBox refBox_half = smemClasses.refBox[label[ite]];
BBox curBox_half = curBox[ite];
BBoxT<float> refBox;
BBoxT<float> curBox_float;
refBox.y1 = __half2float(refBox_half.y1);
refBox.x1 = __half2float(refBox_half.x1);
refBox.y2 = __half2float(refBox_half.y2);
refBox.x2 = __half2float(refBox_half.x2);
curBox_float.y1 = __half2float(curBox_half.y1);
curBox_float.x1 = __half2float(curBox_half.x1);
curBox_float.y2 = __half2float(curBox_half.y2);
curBox_float.x2 = __half2float(curBox_half.x2);
if (boxIoU<float>(refBox, curBox_float) > nmsThreshold)
{
smemClasses.markSamples[curIdx] = false;
}
}
}
}
}
__syncthreads();
// push refIdx/refBox forward to next mark
// only the refIdx thread to push itself. other threads idle
for (int i = threadIdx.x; i < NClass; i += blockDim.x)
{
int refIdx = smemClasses.refIdx[i];
int endIdx = smemClasses.endIdx[i];
if (refIdx < endIdx)
{
do
{
++refIdx;
} while (refIdx < endIdx && smemClasses.markSamples[refIdx] == false);
smemClasses.refIdx[i] = refIdx;
if (refIdx < endIdx)
{
smemClasses.refBox[i] = readBbox<__half>(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]);
}
else
{
atomicAdd(&smemClasses.done, 1);
}
}
}
__syncthreads();
}
// no need to write all data out
for (int segment = 0; segment < validSamples; segment += blockDim.x)
{
int curIdx = segment + threadIdx.x;
if (curIdx < validSamples)
{
outFlagSamples[blockOffset + curIdx] = (smemClasses.markSamples[curIdx] ? 1 : 0);
}
}
}
// TopKGather
// gridDim.x : batch-N
// blockDim.x : Threads
// ItemsPerThreads : = divUp(samples, Threads)
// outDetectionCount : int [N], must be set 0 before kernel
#define MaxItemsPerThreads 8
template <typename DType, typename BoxType, int Threads = 256>
__global__ void TopKGatherProposal_kernel(int samples, int keepTopK, const void* validSampleCountPtr,
const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr,
const void* inFlagSamplesPtr, void* outBboxPtr)
{
typedef BBoxT<BoxType> BBox;
typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1;
typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2;
typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3;
typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4;
typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5;
typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6;
typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7;
typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8;
__shared__ union
{
typename BlockRadixSort8::TempStorage sort8;
typename BlockRadixSort7::TempStorage sort7;
typename BlockRadixSort6::TempStorage sort6;
typename BlockRadixSort5::TempStorage sort5;
typename BlockRadixSort4::TempStorage sort4;
typename BlockRadixSort3::TempStorage sort3;
typename BlockRadixSort2::TempStorage sort2;
typename BlockRadixSort1::TempStorage sort1;
} temp_storage;
assert(MaxItemsPerThreads * Threads >= samples);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
const DType* inScore = static_cast<const DType*>(inScorePtr);
const BBox* inBbox = static_cast<const BBox*>(inBboxPtr);
const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr);
const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr);
BBox* outBbox = static_cast<BBox*>(outBboxPtr);
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
int finalTopK = dMIN(keepTopK, validSamples);
int idx[MaxItemsPerThreads];
DType score[MaxItemsPerThreads];
int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x;
for (int ite = 0; ite < totalItems; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx])
{
idx[ite] = curIdx;
score[ite] = inScore[blockOffset + curIdx];
}
else
{
idx[ite] = -1;
score[ite] = 0.0f;
}
}
switch (totalItems)
{
case 0: break;
case 1:
BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx);
break;
case 2:
BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx);
break;
case 3:
BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx);
break;
case 4:
BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx);
break;
case 5:
BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx);
break;
case 6:
BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx);
break;
case 7:
BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx);
break;
case 8:
BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx);
break;
default: assert(false);
}
__syncthreads();
int outBlockOffset = N * keepTopK;
int topkItems = (keepTopK + (Threads - 1)) / Threads;
for (int i = 0; i < topkItems; ++i)
{
int curI = i * blockDim.x + threadIdx.x;
if (curI < keepTopK)
{
BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f};
if (curI < finalTopK && idx[i] >= 0 && float(score[i]) > MinValidScore)
{
oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]];
}
((BBox*) outBbox)[outBlockOffset + curI] = oB;
}
}
}
#define MaxItemsPerThreads 8
template <typename DType, typename BoxType, int Threads = 256>
__global__ void TopKGather_kernel(int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr,
const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr,
void* outDetectionPtr)
{
typedef BBoxT<BoxType> BBox;
typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1;
typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2;
typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3;
typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4;
typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5;
typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6;
typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7;
typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8;
__shared__ union
{
typename BlockRadixSort8::TempStorage sort8;
typename BlockRadixSort7::TempStorage sort7;
typename BlockRadixSort6::TempStorage sort6;
typename BlockRadixSort5::TempStorage sort5;
typename BlockRadixSort4::TempStorage sort4;
typename BlockRadixSort3::TempStorage sort3;
typename BlockRadixSort2::TempStorage sort2;
typename BlockRadixSort1::TempStorage sort1;
} temp_storage;
assert(MaxItemsPerThreads * Threads >= samples);
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
const DType* inScore = static_cast<const DType*>(inScorePtr);
const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr); // InLabel keeps INT32
const BBox* inBbox = static_cast<const BBox*>(inBboxPtr);
const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr);
const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr);
DType* outDetections = static_cast<DType*>(outDetectionPtr);
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
int finalTopK = dMIN(keepTopK, validSamples);
int idx[MaxItemsPerThreads];
DType score[MaxItemsPerThreads];
int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x;
for (int ite = 0; ite < totalItems; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx])
{
idx[ite] = curIdx;
score[ite] = inScore[blockOffset + curIdx];
}
else
{
idx[ite] = -1;
score[ite] = 0.0f;
}
}
switch (totalItems)
{
case 0: break;
case 1:
BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx);
break;
case 2:
BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx);
break;
case 3:
BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx);
break;
case 4:
BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx);
break;
case 5:
BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx);
break;
case 6:
BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx);
break;
case 7:
BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx);
break;
case 8:
BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx);
break;
default: assert(false);
}
__syncthreads();
int outBlockOffset = N * keepTopK;
int topkItems = (keepTopK + (Threads - 1)) / Threads;
for (int i = 0; i < topkItems; ++i)
{
int curI = i * blockDim.x + threadIdx.x;
if (curI < keepTopK)
{
BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f};
DType oS = 0.0f;
BoxType oL = -1;
if (curI < finalTopK && idx[i] >= 0 && float(score[i]) > MinValidScore)
{
oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]];
oS = score[i];
oL = (BoxType) inLabel[blockOffset + idx[i]];
}
outDetections[(outBlockOffset + curI) * 6] = oB.y1;
outDetections[(outBlockOffset + curI) * 6 + 1] = oB.x1;
outDetections[(outBlockOffset + curI) * 6 + 2] = oB.y2;
outDetections[(outBlockOffset + curI) * 6 + 3] = oB.x2;
outDetections[(outBlockOffset + curI) * 6 + 4] = oL;
outDetections[(outBlockOffset + curI) * 6 + 5] = oS;
}
}
}
RefineDetectionWorkSpace::RefineDetectionWorkSpace(
const int batchSize, const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType inType)
: argMaxScoreDims(sampleCount, 1)
, argMaxBboxDims(sampleCount, 4)
, argMaxLabelDims(sampleCount, 1)
, sortClassScoreDims(sampleCount, 1)
, sortClassLabelDims(sampleCount, 1)
, sortClassSampleIdxDims(sampleCount + 1, 1)
, sortClassPosDims(param.numClasses + 1, 1)
, sortNMSMarkDims(sampleCount, 1)
{
size_t sumSize = 0;
const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT;
// resource
// arMaxScore : [N, samples] : m_Type
argMaxScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize);
argMaxBboxOffset = sumSize;
// argMaxBbox : [N, samples, 4] : m_Type
sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize);
argMaxLabelOffset = sumSize;
// argMaxLabel : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassScoreOffset = sumSize;
// sortClassScore : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize);
sortClassLabelOffset = sumSize;
// sortClassLabel : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize);
sortClassSampleIdxOffset = sumSize;
// sortClassSampleIdx : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassValidCountOffset = sumSize;
// sortClassValidCount : [N, 1] : kINT32
sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassPosOffset = sumSize;
// sortClassPos : [N, numClasses+1] : kINT32
sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortNMSMarkOffset = sumSize;
// sortNMSMark : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
totalSize = sumSize;
}
ProposalWorkSpace::ProposalWorkSpace(const int batchSize, const int inputCnt, const int sampleCount,
const RefineNMSParameters& param, const nvinfer1::DataType inType)
: preRefineScoreDims(inputCnt, 1)
, preRefineSortedScoreDims(inputCnt, 1)
, preRefineBboxDims(inputCnt, 4)
, argMaxScoreDims(sampleCount, 1)
, argMaxBboxDims(sampleCount, 4)
, argMaxLabelDims(sampleCount, 1)
, sortClassScoreDims(sampleCount, 1)
, sortClassLabelDims(sampleCount, 1)
, sortClassSampleIdxDims(sampleCount, 1)
, sortClassPosDims(param.numClasses + 1, 1)
, sortNMSMarkDims(sampleCount, 1)
{
size_t sumSize = 0;
const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT;
// resource
// temp storage size for sorting scores
tempStorageOffset = sumSize;
sumSize += (1 << 23) * batchSize;
// preRefineScore : [N, inputcnt, 1] // extracted foreground score from inputs[0]
preRefineScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(preRefineScoreDims) * typeSize(type) * batchSize);
// preRefineSortedScore: [N, inputcnt, 1]
preRefineSortedScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(preRefineSortedScoreDims) * typeSize(type) * batchSize);
// preRefineBbox: [N, inputcnt, 4] // sorted bbox
preRefineBboxOffset = sumSize;
sumSize += AlignMem(dimVolume(preRefineBboxDims) * typeSize(type) * batchSize);
// arMaxScore : [N, samples] : m_Type
argMaxScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize);
argMaxBboxOffset = sumSize;
// argMaxBbox : [N, samples, 4] : m_Type
sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize);
argMaxLabelOffset = sumSize;
// argMaxLabel : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassScoreOffset = sumSize;
// sortClassScore : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize);
sortClassLabelOffset = sumSize;
// sortClassLabel : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize);
sortClassSampleIdxOffset = sumSize;
// sortClassSampleIdx : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassValidCountOffset = sumSize;
// sortClassValidCount : [N, 1] : kINT32
sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassPosOffset = sumSize;
// sortClassPos : [N, numClasses+1] : kINT32
sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortNMSMarkOffset = sumSize;
// sortNMSMark : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
totalSize = sumSize;
}
MultilevelProposeROIWorkSpace::MultilevelProposeROIWorkSpace(const int batchSize, const int inputCnt,
const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType inType)
: preRefineSortedScoreDims(inputCnt, 1)
, preRefineBboxDims(inputCnt, 4)
, argMaxScoreDims(sampleCount, 1)
, argMaxBboxDims(sampleCount, 4)
, argMaxLabelDims(sampleCount, 1)
, sortClassScoreDims(sampleCount, 1)
, sortClassLabelDims(sampleCount, 1)
, sortClassSampleIdxDims(sampleCount + 1, 1)
, sortClassPosDims(param.numClasses + 1, 1)
, sortNMSMarkDims(sampleCount, 1)
{
size_t sumSize = 0;
const nvinfer1::DataType type = inType;
// resource
// temp storage size for sorting scores
tempStorageOffset = sumSize;
sumSize += (1 << 23) * batchSize;
// preRefineSortedScore: [N, inputcnt, 1]
preRefineSortedScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(preRefineSortedScoreDims) * typeSize(type) * batchSize);
// preRefineBbox: [N, inputcnt, 4] // sorted bbox
preRefineBboxOffset = sumSize;
sumSize += AlignMem(dimVolume(preRefineBboxDims) * typeSize(type) * batchSize);
// argMaxScore : [N, samples] : m_Type
argMaxScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize);
argMaxBboxOffset = sumSize;
// argMaxBbox : [N, samples, 4] : m_Type
sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize);
argMaxLabelOffset = sumSize;
// argMaxLabel : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(type) * batchSize);
sortClassScoreOffset = sumSize;
// sortClassScore : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize);
sortClassLabelOffset = sumSize;
// sortClassLabel : [N, samples] : m_Type
sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize);
sortClassSampleIdxOffset = sumSize;
// sortClassSampleIdx : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassValidCountOffset = sumSize;
// sortClassValidCount : [N, 1] : kINT32
sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortClassPosOffset = sumSize;
// sortClassPos : [N, numClasses+1] : kINT32
sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
sortNMSMarkOffset = sumSize;
// sortNMSMark : [N, samples] : kINT32
sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize);
totalSize = sumSize;
}
ConcatTopKWorkSpace::ConcatTopKWorkSpace(
const int batchSize, const int concatCnt, const int topK, const nvinfer1::DataType inType)
: concatedScoreDims(concatCnt * topK, 1)
, concatedBBoxDims(concatCnt * topK, 4)
, sortedScoreDims(concatCnt * topK, 1)
, sortedBBoxDims(concatCnt * topK, 4)
{
size_t sumSize = 0;
// const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT;
const nvinfer1::DataType type = inType;
// resource
// temp storage size for sorting scores
tempStorageOffset = sumSize;
sumSize += (1 << 23) * batchSize;
// concatedScoreOffset: [N, concatCnt*topK, 1]
concatedScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(concatedScoreDims) * typeSize(type) * batchSize);
// concatedBBoxOffset: [N, concatCnt*topK, 4]
concatedBBoxOffset = sumSize;
sumSize += AlignMem(dimVolume(concatedBBoxDims) * typeSize(type) * batchSize);
// sortedScoreOffset: [N, concatCnt * topK, 1]
sortedScoreOffset = sumSize;
sumSize += AlignMem(dimVolume(sortedScoreDims) * typeSize(type) * batchSize);
// sortedBBoxOffset: [N, concatCnt * topK, 4]
sortedBBoxOffset = sumSize;
sumSize += AlignMem(dimVolume(sortedBBoxDims) * typeSize(type) * batchSize);
totalSize = sumSize;
}
template <int Threads>
cudaError_t argMaxGroup(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass,
const void* inScore, const void* inBbox, const void* validSamples, void* outScore, void* outLabel, void* outBbox)
{
int gridX = nAlignDown(dMIN(samples, 512 / N), 32);
gridX = dMAX(gridX, 1);
dim3 gridDim = {static_cast<unsigned int>(gridX), static_cast<unsigned int>(N), 1};
dim3 threads = {Threads, 1, 1};
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
argMaxGroup_kernel<float, float, Threads><<<gridDim, threads, 0, stream>>>(
samples, 0, NClass, inScore, inBbox, validSamples, outScore, outLabel, outBbox);
break;
case nvinfer1::DataType::kHALF: break;
default: assert(false);
}
return cudaGetLastError();
}
template <int Threads>
cudaError_t argMaxWOBackground(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass,
const void* inScore, const void* inBbox, const void* validSamples, void* outScore, void* outLabel, void* outBbox)
{
int gridX = nAlignDown(dMIN(samples, 512 / N), 32);
gridX = dMAX(gridX, 1);
dim3 gridDim = {static_cast<unsigned int>(gridX), static_cast<unsigned int>(N), 1};
dim3 threads = {Threads, 1, 1};
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
argMaxGroup_kernel<float, float, Threads><<<gridDim, threads, 0, stream>>>(
samples, 1, NClass, inScore, inBbox, validSamples, outScore, outLabel, outBbox);
break;
case nvinfer1::DataType::kHALF: break;
default: assert(false);
}
return cudaGetLastError();
}
template <int Threads, int ItermPerThreads>
cudaError_t sortPerClass(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass, int background,
float scoreThreshold, const void* inSampleValidCount, const void* inScorePtr, const void* inLabelPtr,
const void* inBboxPtr, void* outclassStartPosPtr, void* outScorePtr, void* outLabelPtr, void* outSampleIdxPtr,
void* outValidSampleCountPtr)
{
int blocks = N;
int threads = Threads;
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
sortPerClass_kernel<float, float, Threads, ItermPerThreads><<<blocks, threads, 0, stream>>>(samples, NClass,
background, scoreThreshold, inSampleValidCount, inScorePtr, inLabelPtr, inBboxPtr, outclassStartPosPtr,
outScorePtr, outLabelPtr, outSampleIdxPtr, outValidSampleCountPtr);
break;
case nvinfer1::DataType::kHALF:
sortPerClass_kernel_half<Threads, ItermPerThreads><<<blocks, threads, 0, stream>>>(samples, NClass,
background, scoreThreshold, inSampleValidCount, inScorePtr, inLabelPtr, inBboxPtr, outclassStartPosPtr,
outScorePtr, outLabelPtr, outSampleIdxPtr, outValidSampleCountPtr);
break;
default: assert(false);
}
return cudaGetLastError();
};
template <int Threads>
cudaError_t PerClassNMS(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass,
const float nmsThreshold, const void* validSampleCount,
// const void *inScore,
const void* inLabel, const void* inBbox, const void* inBboxRefIdx, const void* classStarts, void* outFlagSamples)
{
int blocks = N;
int threads = Threads;
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
PerClassNMS_kernel<float, float, Threads><<<blocks, threads, 0, stream>>>(samples, NClass, nmsThreshold,
validSampleCount, inLabel, inBbox, inBboxRefIdx, classStarts, outFlagSamples);
break;
case nvinfer1::DataType::kHALF:
PerClassNMS_half_kernel<Threads><<<blocks, threads, 0, stream>>>(samples, NClass, nmsThreshold,
validSampleCount, inLabel, inBbox, inBboxRefIdx, classStarts, outFlagSamples);
break;
default: assert(false);
}
return cudaGetLastError();
}
template <int Threads>
cudaError_t KeepTopKGather(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int keepTopK,
const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr,
const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outDetections, int proposal)
{
int blocks = N;
int threads = Threads;
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
if (proposal)
{
TopKGatherProposal_kernel<float, float, Threads><<<blocks, threads, 0, stream>>>(samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr,
outDetections);
}
else
{
TopKGather_kernel<float, float, Threads><<<blocks, threads, 0, stream>>>(samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr,
outDetections);
}
break;
case nvinfer1::DataType::kHALF: break;
default: assert(false);
}
return cudaGetLastError();
}
// TopKGather For TLT RPN Proposal
// gridDim.x : batch-N
// blockDim.x : Threads
// ItemsPerThreads : = divUp(samples, Threads)
// outDetectionCount : int [N], must be set 0 before kernel
#define MaxItemsPerThreads 8
template <typename DType, typename BoxType, int Threads = 256>
__global__ void TopKGatherBoxScore_kernel(int samples, int keepTopK, const void* validSampleCountPtr,
const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr,
const void* inFlagSamplesPtr, void* outScorePtr, void* outBboxPtr)
{
typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1;
typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2;
typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3;
typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4;
typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5;
typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6;
typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7;
typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8;
__shared__ union {
typename BlockRadixSort8::TempStorage sort8;
typename BlockRadixSort7::TempStorage sort7;
typename BlockRadixSort6::TempStorage sort6;
typename BlockRadixSort5::TempStorage sort5;
typename BlockRadixSort4::TempStorage sort4;
typename BlockRadixSort3::TempStorage sort3;
typename BlockRadixSort2::TempStorage sort2;
typename BlockRadixSort1::TempStorage sort1;
} temp_storage;
assert(MaxItemsPerThreads * Threads >= samples);
typedef BBoxT<BoxType> BBox;
const int* validSampleCount = static_cast<const int*>(validSampleCountPtr);
const DType* inScore = static_cast<const DType*>(inScorePtr);
const BBox* inBbox = static_cast<const BBox*>(inBboxPtr);
const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr);
const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr);
BBox* outBbox = static_cast<BBox*>(outBboxPtr);
DType* outScore = static_cast<DType*>(outScorePtr);
int N = blockIdx.x;
int blockOffset = N * samples;
int validSamples = validSampleCount[N];
int finalTopK = dMIN(keepTopK, validSamples);
int idx[MaxItemsPerThreads];
DType score[MaxItemsPerThreads];
int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x;
for (int ite = 0; ite < totalItems; ++ite)
{
int curIdx = ite * blockDim.x + threadIdx.x;
if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx])
{
idx[ite] = curIdx;
score[ite] = inScore[blockOffset + curIdx];
}
else
{
idx[ite] = -1;
score[ite] = 0.0f;
}
}
switch (totalItems)
{
case 0: break;
case 1:
BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx);
break;
case 2:
BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx);
break;
case 3:
BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx);
break;
case 4:
BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx);
break;
case 5:
BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx);
break;
case 6:
BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx);
break;
case 7:
BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx);
break;
case 8:
BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx);
break;
default: assert(false);
}
__syncthreads();
int outBlockOffset = N * keepTopK;
int topkItems = (keepTopK + (Threads - 1)) / Threads;
for (int i = 0; i < topkItems; ++i)
{
int curI = i * blockDim.x + threadIdx.x;
if (curI < keepTopK)
{
BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f};
DType oS = 0.0f;
if (curI < finalTopK && idx[i] >= 0)
{
oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]];
oS = score[i];
}
((BBox*) outBbox)[outBlockOffset + curI] = oB;
outScore[outBlockOffset + curI] = oS;
}
}
}
template <int Threads>
cudaError_t KeepTopKGatherBoxScore(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int keepTopK,
const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr,
const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outScores, void* outDetections, int proposal)
{
int blocks = N;
int threads = Threads;
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
if (proposal)
{
TopKGatherBoxScore_kernel<float, float, Threads><<<blocks, threads, 0, stream>>>(samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outScores,
outDetections);
}
else
{
TopKGather_kernel<float, float, Threads><<<blocks, threads, 0, stream>>>(samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr,
outDetections);
}
break;
case nvinfer1::DataType::kHALF:
if (proposal)
{
TopKGatherBoxScore_kernel<__half, __half, Threads><<<blocks, threads, 0, stream>>>(samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outScores,
outDetections);
}
else
{
TopKGather_kernel<__half, __half, Threads><<<blocks, threads, 0, stream>>>(samples, keepTopK,
validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr,
outDetections);
}
break;
default: assert(false);
}
return cudaGetLastError();
}
cudaError_t RefineBatchClassNMS(cudaStream_t stream, int N, int samples, nvinfer1::DataType dtype,
const RefineNMSParameters& param, const RefineDetectionWorkSpace& refineOffset, void* workspace,
const void* inScores, const void* inDelta, const void* inCountValid, const void* inROI, void* outDetections)
{
int NClass = param.numClasses;
int8_t* wsPtr = static_cast<int8_t*>(workspace);
void* argMaxScorePtr = wsPtr + refineOffset.argMaxScoreOffset;
void* argMaxLabelPtr = wsPtr + refineOffset.argMaxLabelOffset;
void* argMaxBBoxPtr = wsPtr + refineOffset.argMaxBboxOffset;
void* sortClassScorePtr = wsPtr + refineOffset.sortClassScoreOffset;
void* sortClassLabelPtr = wsPtr + refineOffset.sortClassLabelOffset;
void* sortClassSampleIdxPtr = wsPtr + refineOffset.sortClassSampleIdxOffset;
void* sortClassValidCountPtr = wsPtr + refineOffset.sortClassValidCountOffset;
void* sortClassPosPtr = wsPtr + refineOffset.sortClassPosOffset;
void* sortNMSMarkPtr = wsPtr + refineOffset.sortNMSMarkOffset;
cudaError_t status = cudaSuccess;
CUASSERT(cudaMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream));
if (NClass > 1)
{ // multiple classes
status = argMaxGroup<32>(stream, N, dtype, samples, NClass, inScores, inDelta, inCountValid, argMaxScorePtr,
argMaxLabelPtr, argMaxBBoxPtr); // argMaxBBoxPtr means delta of bboxes
assert(status == cudaSuccess);
CUASSERT(status);
}
else
{ // Only one class
argMaxScorePtr = const_cast<void*>(inScores);
argMaxBBoxPtr = const_cast<void*>(inDelta);
int threads = 512;
int blocks = (N * samples + threads - 1) / threads;
blocks = dMIN(blocks, 8);
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
resetMemValue_kernel<float><<<blocks, threads, 0, stream>>>(argMaxLabelPtr, N * samples, 0);
break;
}
case nvinfer1::DataType::kHALF: { break;
}
default: assert(false);
}
}
status = ApplyDelta2Bboxes(stream, N, samples, inROI, argMaxBBoxPtr, argMaxBBoxPtr);
assert(status == cudaSuccess);
if (samples <= 1024)
{
status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 2048)
{
status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 4096)
{
status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else
{
assert(false && "unsupported sortPerClass");
return cudaErrorLaunchFailure;
}
assert(status == cudaSuccess);
CUASSERT(status);
status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr,
// sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr);
assert(status == cudaSuccess);
CUASSERT(status);
status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outDetections, 0);
assert(status == cudaSuccess);
CUASSERT(status);
return status;
}
cudaError_t DetectionPostProcess(cudaStream_t stream, int N, int samples, const float* regWeight,
const float inputHeight, const float inputWidth, nvinfer1::DataType dtype, const RefineNMSParameters& param,
const RefineDetectionWorkSpace& refineOffset, void* workspace, const void* inScores, const void* inDelta,
const void* inCountValid, const void* inROI, void* outDetections)
{
int NClass = param.numClasses;
int8_t* wsPtr = static_cast<int8_t*>(workspace);
void* argMaxScorePtr = wsPtr + refineOffset.argMaxScoreOffset;
void* argMaxLabelPtr = wsPtr + refineOffset.argMaxLabelOffset;
void* argMaxBBoxPtr = wsPtr + refineOffset.argMaxBboxOffset;
void* sortClassScorePtr = wsPtr + refineOffset.sortClassScoreOffset;
void* sortClassLabelPtr = wsPtr + refineOffset.sortClassLabelOffset;
void* sortClassSampleIdxPtr = wsPtr + refineOffset.sortClassSampleIdxOffset;
void* sortClassValidCountPtr = wsPtr + refineOffset.sortClassValidCountOffset;
void* sortClassPosPtr = wsPtr + refineOffset.sortClassPosOffset;
void* sortNMSMarkPtr = wsPtr + refineOffset.sortNMSMarkOffset;
cudaError_t status = cudaSuccess;
CUASSERT(cudaMemsetAsync(argMaxScorePtr, 0, N * samples * sizeof(float), stream));
CUASSERT(cudaMemsetAsync(argMaxBBoxPtr, 0, N * samples * 4 * sizeof(float), stream));
CUASSERT(cudaMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream));
CUASSERT(cudaMemsetAsync(sortClassPosPtr, 0, N * (NClass + 1) * sizeof(int), stream));
CUASSERT(cudaMemsetAsync(sortClassSampleIdxPtr, 0, N * (samples + 1) * sizeof(int), stream));
if (NClass > 1)
{ // multiple classes
status = argMaxWOBackground<32>(stream, N, dtype, samples, NClass, inScores, inDelta, inCountValid,
argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr); // argMaxBBoxPtr means delta of bboxes
assert(status == cudaSuccess);
CUASSERT(status);
}
else
{ // Only one class
argMaxScorePtr = const_cast<void*>(inScores);
argMaxBBoxPtr = const_cast<void*>(inDelta);
int threads = 512;
int blocks = (N * samples + threads - 1) / threads;
blocks = dMIN(blocks, 8);
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
resetMemValue_kernel<float><<<blocks, threads, 0, stream>>>(argMaxLabelPtr, N * samples, 0);
break;
}
case nvinfer1::DataType::kHALF: { break;
}
default: assert(false);
}
}
status = DecodeBBoxes(stream, N, samples, regWeight, inputHeight, inputWidth, inROI, argMaxBBoxPtr, argMaxBBoxPtr, dtype);
assert(status == cudaSuccess);
if (samples <= 1024)
{
status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 2048)
{
status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 4096)
{
status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else
{
assert(false && "unsupported sortPerClass");
return cudaErrorLaunchFailure;
}
assert(status == cudaSuccess);
CUASSERT(status);
status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr,
// sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr);
CUASSERT(status);
status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outDetections, 0);
CUASSERT(status);
return status;
}
struct BF_SCORE
{
float bg, fg;
};
// in_scores : [N, samples, 2]
// output_score : [N, samples, 1]
__global__ void extract_fg_kernel(int samples, const void* in_scores, void* output_score)
{
const BF_SCORE* in = static_cast<const BF_SCORE*>(in_scores);
float* out = static_cast<float*>(output_score);
int N = blockIdx.x;
int blockOffset = N * samples;
int totalItems = (samples + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < samples)
{
out[blockOffset + cur_id] = in[blockOffset + cur_id].fg;
}
}
}
__global__ void set_offset_kernel(int stride, int size, int* output)
{
// One block, because batch size shouldn't be too large.
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
output[i] = i * stride;
}
}
template <typename Dtype>
__global__ void resample_kernel(int orig_size, int sample_size, const void* orig_score_ptr, const void* orig_bbox_ptr,
void* sampled_score_ptr, void* sampled_bbox_ptr)
{
const Dtype* in_score = static_cast<const Dtype*>(orig_score_ptr);
const BBoxT<Dtype>* in_bbox = static_cast<const BBoxT<Dtype>*>(orig_bbox_ptr);
Dtype* out_score = static_cast<Dtype*>(sampled_score_ptr);
BBoxT<Dtype>* out_bbox = static_cast<BBoxT<Dtype>*>(sampled_bbox_ptr);
int N = blockIdx.x;
int blockOffset_in = N * orig_size;
int blockOffset_out = N * sample_size;
int realSampleCnt = dMIN(sample_size, orig_size);
int totalItems = (realSampleCnt + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < realSampleCnt)
{
out_score[blockOffset_out + cur_id] = in_score[blockOffset_in + cur_id];
out_bbox[blockOffset_out + cur_id] = in_bbox[blockOffset_in + cur_id];
}
}
}
cudaError_t proposalRefineBatchClassNMS(cudaStream_t stream, int N, int inputCnt, int samples, nvinfer1::DataType dtype,
const RefineNMSParameters& param, const ProposalWorkSpace& proposalOffset, void* workspace,
const void* inScores, //[N, inputcnt, 2]
const void* inDelta, //[N, inputcnt, 4]
const void* inCountValid,
const void* inAnchors, //[N, inputcnt, 4]
void* outProposals)
{
int8_t* wsPtr = static_cast<int8_t*>(workspace);
void* tempStoragePtr = wsPtr + proposalOffset.tempStorageOffset;
void* preRefineScorePtr = wsPtr + proposalOffset.preRefineScoreOffset;
void* preRefineSortedScorePtr = wsPtr + proposalOffset.preRefineSortedScoreOffset;
void* preRefineBboxPtr = wsPtr + proposalOffset.preRefineBboxOffset;
void* argMaxScorePtr = wsPtr + proposalOffset.argMaxScoreOffset;
void* argMaxLabelPtr = wsPtr + proposalOffset.argMaxLabelOffset;
void* argMaxBBoxPtr = wsPtr + proposalOffset.argMaxBboxOffset;
void* sortClassScorePtr = wsPtr + proposalOffset.sortClassScoreOffset;
void* sortClassLabelPtr = wsPtr + proposalOffset.sortClassLabelOffset;
void* sortClassSampleIdxPtr = wsPtr + proposalOffset.sortClassSampleIdxOffset;
void* sortClassValidCountPtr = wsPtr + proposalOffset.sortClassValidCountOffset;
void* sortClassPosPtr = wsPtr + proposalOffset.sortClassPosOffset;
void* sortNMSMarkPtr = wsPtr + proposalOffset.sortNMSMarkOffset;
cudaError_t status = cudaSuccess;
CUASSERT(cudaMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream));
// extract foreground score
extract_fg_kernel<<<N, dMIN(inputCnt, 1024), 0, stream>>>(inputCnt, inScores, preRefineScorePtr);
CUASSERT(cudaGetLastError());
// Here, inDelta are converted to normalize coordinates based on anchors
status = ApplyDelta2Bboxes(stream, N, inputCnt, inAnchors, inDelta, const_cast<void*>(inDelta));
CUASSERT(status);
// sort the score
// d_key_in: preRefineScorePtr [N, inputCnt, 1]
// d_key_out: preRefineSortedScorePtr
// d_values_in: inDelta [N, inputCnt, 4]
// d_values_out: preRefineBboxPtr
// num_items: inputCnt*N
// num_segments: N
// offsets: [0, inputCnt, inputCnt*2, ..., ]
int* offsets = static_cast<int*>(tempStoragePtr);
set_offset_kernel<<<1, 1024, 0, stream>>>(inputCnt, N + 1, offsets);
assert(cudaGetLastError() == cudaSuccess);
tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1));
size_t temp_storage_bytes = 0;
cub::DeviceSegmentedRadixSort::SortPairsDescending(NULL, temp_storage_bytes, (float*) preRefineScorePtr,
(float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N,
offsets, offsets + 1, 0, 8 * sizeof(float), stream);
assert((1 << 23) * (size_t) N > temp_storage_bytes);
cub::DeviceSegmentedRadixSort::SortPairsDescending(tempStoragePtr, temp_storage_bytes, (float*) preRefineScorePtr,
(float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N,
offsets, offsets + 1, 0, 8 * sizeof(float), stream);
int NClass = param.numClasses;
assert(NClass == 1);
if (NClass == 1)
{ // Only one class
resample_kernel<float><<<N, dMIN(samples, 1024), 0, stream>>>(
inputCnt, samples, preRefineSortedScorePtr, preRefineBboxPtr, argMaxScorePtr, argMaxBBoxPtr);
int threads = 512;
int blocks = (N * samples + threads - 1) / threads;
blocks = dMIN(blocks, 8);
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
resetMemValue_kernel<float><<<blocks, threads, 0, stream>>>(argMaxLabelPtr, N * samples, 0);
break;
}
case nvinfer1::DataType::kHALF: { break;
}
default: assert(false);
}
}
if (samples <= 1024)
{
status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 2048)
{
status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 4096)
{
status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else
{
assert(false && "unsupported sortPerClass");
return cudaErrorLaunchFailure;
}
CUASSERT(status);
status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr,
// sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr);
CUASSERT(status);
status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outProposals, 1);
CUASSERT(status);
return status;
}
template<typename Dtype>
void score_bbox_cub_sort(void* tempStorage,
const void* inScore,
void* sortedScore,
const void* inBBox,
void* sortedBBox,
int totalCnt,
int segCnt,
int* offsets,
cudaStream_t stream
)
{
size_t temp_storage_bytes = 0;
cub::DeviceSegmentedRadixSort::SortPairsDescending(NULL, temp_storage_bytes, (Dtype*) inScore,
(Dtype*) sortedScore, (BBoxT<Dtype>*) inBBox, (BBoxT<Dtype>*) sortedBBox, totalCnt, segCnt,
offsets, offsets + 1, 0, 8 * sizeof(Dtype), stream);
CUASSERT(cudaGetLastError());
cub::DeviceSegmentedRadixSort::SortPairsDescending(tempStorage, temp_storage_bytes, (Dtype*) inScore,
(Dtype*) sortedScore, (BBoxT<Dtype>*) inBBox, (BBoxT<Dtype>*) sortedBBox, totalCnt, segCnt,
offsets, offsets + 1, 0, 8 * sizeof(Dtype), stream);
CUASSERT(cudaGetLastError());
}
cudaError_t MultilevelPropose(cudaStream_t stream, int N, int inputCnt, int samples, const float* regWeight,
const float inputHeight, const float inputWidth, nvinfer1::DataType dtype, const RefineNMSParameters& param,
const MultilevelProposeROIWorkSpace& proposalOffset, void* workspace,
const void* inScore, //[N, inputcnt, 1]
const void* inDelta, //[N, inputcnt, 4]
void* inCountValid,
const void* inAnchors, //[N, inputcnt, 4]
void* outScore, void* outBbox)
{
int8_t* wsPtr = static_cast<int8_t*>(workspace);
void* tempStoragePtr = wsPtr + proposalOffset.tempStorageOffset;
void* preRefineSortedScorePtr = wsPtr + proposalOffset.preRefineSortedScoreOffset;
void* preRefineBboxPtr = wsPtr + proposalOffset.preRefineBboxOffset;
void* argMaxScorePtr = wsPtr + proposalOffset.argMaxScoreOffset;
void* argMaxLabelPtr = wsPtr + proposalOffset.argMaxLabelOffset;
void* argMaxBBoxPtr = wsPtr + proposalOffset.argMaxBboxOffset;
void* sortClassScorePtr = wsPtr + proposalOffset.sortClassScoreOffset;
void* sortClassLabelPtr = wsPtr + proposalOffset.sortClassLabelOffset;
void* sortClassSampleIdxPtr = wsPtr + proposalOffset.sortClassSampleIdxOffset;
void* sortClassValidCountPtr = wsPtr + proposalOffset.sortClassValidCountOffset;
void* sortClassPosPtr = wsPtr + proposalOffset.sortClassPosOffset;
void* sortNMSMarkPtr = wsPtr + proposalOffset.sortNMSMarkOffset;
cudaError_t status = cudaSuccess;
int NClass = param.numClasses;
assert(NClass == 1);
CUASSERT(cudaMemsetAsync(argMaxScorePtr, 0, N * samples * sizeof(dtype), stream));
CUASSERT(cudaMemsetAsync(argMaxBBoxPtr, 0, N * samples * 4 * sizeof(dtype), stream));
CUASSERT(cudaMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream));
CUASSERT(cudaMemsetAsync(sortClassPosPtr, 0, N * (NClass + 1) * sizeof(int), stream));
CUASSERT(cudaMemsetAsync(sortClassSampleIdxPtr, 0, N * (samples + 1) * sizeof(int), stream));
CUASSERT(cudaGetLastError());
// Here, inDelta are converted to normalize coordinates based on anchors
status = DecodeBBoxes(
stream, N, inputCnt, regWeight, inputHeight, inputWidth, inAnchors, inDelta, const_cast<void*>(inDelta), dtype);
CUASSERT(cudaGetLastError());
// sort the score
// d_key_in: preRefineScorePtr [N, inputCnt, 1]
// d_key_out: preRefineSortedScorePtr
// d_values_in: inDelta [N, inputCnt, 4]
// d_values_out: preRefineBboxPtr
// num_items: inputCnt*N
// num_segments: N
// offsets: [0, inputCnt, inputCnt*2, ..., ]
int* offsets = static_cast<int*>(tempStoragePtr);
set_offset_kernel<<<1, 1024, 0, stream>>>(inputCnt, N + 1, offsets);
CUASSERT(cudaGetLastError());
tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1));
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
score_bbox_cub_sort<float>(tempStoragePtr, inScore, preRefineSortedScorePtr,
inDelta, preRefineBboxPtr, N * inputCnt, N,
offsets, stream);
break;
}
case nvinfer1::DataType::kHALF:
{
score_bbox_cub_sort<__half>(tempStoragePtr, inScore, preRefineSortedScorePtr,
inDelta, preRefineBboxPtr, N * inputCnt, N,
offsets, stream);
break;
}
default: assert(false);
}
if (NClass == 1)
{ // Only one class
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
resample_kernel<float><<<N, dMIN(samples, 1024), 0, stream>>>(
inputCnt, samples, preRefineSortedScorePtr, preRefineBboxPtr, argMaxScorePtr, argMaxBBoxPtr);
CUASSERT(cudaGetLastError());
break;
}
case nvinfer1::DataType::kHALF:
{
resample_kernel<__half><<<N, dMIN(samples, 1024), 0, stream>>>(
inputCnt, samples, preRefineSortedScorePtr, preRefineBboxPtr, argMaxScorePtr, argMaxBBoxPtr);
CUASSERT(cudaGetLastError());
break;
}
default: assert(false);
}
int threads = 512;
int blocks = (N * samples + threads - 1) / threads;
blocks = dMIN(blocks, 8);
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
resetMemValue_kernel<float><<<blocks, threads, 0, stream>>>(argMaxLabelPtr, N * samples, 0);
CUASSERT(cudaGetLastError());
break;
}
case nvinfer1::DataType::kHALF: {
resetMemValue_kernel<__half><<<blocks, threads, 0, stream>>>(argMaxLabelPtr, N * samples, 0);
CUASSERT(cudaGetLastError());
break;
}
default: assert(false);
}
}
if (samples <= 1024)
{
status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 2048)
{
status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else if (samples <= 4096)
{
status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold,
inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr,
sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr);
}
else
{
assert(false && "unsupported sortPerClass");
return cudaErrorLaunchFailure;
}
CUASSERT(cudaGetLastError());
status = PerClassNMS<1024>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr,
// sortClassScorePtr,
sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr);
CUASSERT(cudaGetLastError());
status = KeepTopKGatherBoxScore<512>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr,
sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outScore, outBbox,
1);
CUASSERT(cudaGetLastError());
return status;
}
struct BBOX
{
float y1, x1, y2, x2;
};
struct DELTA
{
float dy, dx, logdh, logdw;
};
struct DELTA_HALF
{
__half dy, dx, logdh, logdw;
};
__global__ void decode_bboxes_kernel(int samples, const void* anchors, const void* delta, const float* regWeight,
const float inputHeight, const float inputWidth, void* outputBbox, float bboxClipThresh)
{
const BBOX* anchors_in = static_cast<const BBOX*>(anchors);
const DELTA* delta_in = static_cast<const DELTA*>(delta);
BBOX* bbox_out = static_cast<BBOX*>(outputBbox);
int N = blockIdx.x;
int blockOffset = N * samples;
int totalItems = (samples + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < samples)
{
BBOX cur_anchor_yxyx = anchors_in[blockOffset + cur_id];
// convert yxyx -> cyxhw
// cy, cx, h, w
/*BBOX cur_anchor_cyxhw;*/
float cur_anchor_h = (cur_anchor_yxyx.y2 - cur_anchor_yxyx.y1 + 1.0);
float cur_anchor_w = (cur_anchor_yxyx.x2 - cur_anchor_yxyx.x1 + 1.0); // w
float cur_anchor_yc = cur_anchor_yxyx.y1 + cur_anchor_h * 0.5; // cy
float cur_anchor_xc = cur_anchor_yxyx.x1 + cur_anchor_w * 0.5; // cx
DELTA cur_delta = delta_in[blockOffset + cur_id];
// divided by regWeight
cur_delta.dy /= regWeight[0];
cur_delta.dx /= regWeight[1];
cur_delta.logdh /= regWeight[2];
cur_delta.logdw /= regWeight[3];
cur_delta.logdh = dMIN(cur_delta.logdh, bboxClipThresh);
cur_delta.logdw = dMIN(cur_delta.logdw, bboxClipThresh);
// apply delta
float decoded_box_yc = cur_anchor_yc + cur_delta.dy * cur_anchor_h;
float decoded_box_xc = cur_anchor_xc + cur_delta.dx * cur_anchor_w;
float decoded_box_h = expf(cur_delta.logdh) * cur_anchor_h;
float decoded_box_w = expf(cur_delta.logdw) * cur_anchor_w;
float decoded_box_ymin = decoded_box_yc - 0.5 * decoded_box_h;
float decoded_box_xmin = decoded_box_xc - 0.5 * decoded_box_w;
float decoded_box_ymax = decoded_box_ymin + decoded_box_h - 1.0;
float decoded_box_xmax = decoded_box_xmin + decoded_box_w - 1.0;
// clip bbox: a more precision clip method based on real window could be implemented
decoded_box_ymin = dMAX(dMIN(decoded_box_ymin, inputHeight - 1.0), 0.0);
decoded_box_xmin = dMAX(dMIN(decoded_box_xmin, inputWidth - 1.0), 0.0);
decoded_box_ymax = dMAX(dMIN(decoded_box_ymax, inputHeight - 1.0), 0.0);
decoded_box_xmax = dMAX(dMIN(decoded_box_xmax, inputWidth - 1.0), 0.0);
bbox_out[blockOffset + cur_id].y1 = decoded_box_ymin;
bbox_out[blockOffset + cur_id].x1 = decoded_box_xmin;
bbox_out[blockOffset + cur_id].y2 = decoded_box_ymax;
bbox_out[blockOffset + cur_id].x2 = decoded_box_xmax;
}
}
}
__global__ void decode_bboxes_kernel_half(int samples, const void* anchors, const void* delta, const float* regWeight,
const float inputHeight, const float inputWidth, void* outputBbox, float bboxClipThresh)
{
const BBoxT<float>* anchors_in = static_cast<const BBoxT<float>*>(anchors);
const DELTA_HALF* delta_in = static_cast<const DELTA_HALF*>(delta);
BBoxT<__half>* bbox_out = static_cast<BBoxT<__half>*>(outputBbox);
int N = blockIdx.x;
int blockOffset = N * samples;
int totalItems = (samples + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < samples)
{
BBoxT<float> cur_anchor_yxyx = anchors_in[blockOffset + cur_id];
// convert yxyx -> cyxhw
// cy, cx, h, w
float cur_anchor_h = (cur_anchor_yxyx.y2 - cur_anchor_yxyx.y1 + 1.0);
float cur_anchor_w = (cur_anchor_yxyx.x2 - cur_anchor_yxyx.x1 + 1.0); // w
float cur_anchor_yc = cur_anchor_yxyx.y1 + cur_anchor_h * 0.5; // cy
float cur_anchor_xc = cur_anchor_yxyx.x1 + cur_anchor_w * 0.5; // cx
DELTA_HALF cur_delta_half = delta_in[blockOffset + cur_id];
DELTA cur_delta;
cur_delta.dy = __half2float(cur_delta_half.dy);
cur_delta.dx = __half2float(cur_delta_half.dx);
cur_delta.logdh = __half2float(cur_delta_half.logdh);
cur_delta.logdw = __half2float(cur_delta_half.logdw);
// divided by regWeight
cur_delta.dy /= regWeight[0];
cur_delta.dx /= regWeight[1];
cur_delta.logdh /= regWeight[2];
cur_delta.logdw /= regWeight[3];
cur_delta.logdh = dMIN(cur_delta.logdh, bboxClipThresh);
cur_delta.logdw = dMIN(cur_delta.logdw, bboxClipThresh);
// apply delta
float decoded_box_yc = cur_anchor_yc + cur_delta.dy * cur_anchor_h;
float decoded_box_xc = cur_anchor_xc + cur_delta.dx * cur_anchor_w;
float decoded_box_h = expf(cur_delta.logdh) * cur_anchor_h;
float decoded_box_w = expf(cur_delta.logdw) * cur_anchor_w;
float decoded_box_ymin = decoded_box_yc - 0.5 * decoded_box_h;
float decoded_box_xmin = decoded_box_xc - 0.5 * decoded_box_w;
float decoded_box_ymax = decoded_box_ymin + decoded_box_h - 1.0;
float decoded_box_xmax = decoded_box_xmin + decoded_box_w - 1.0;
// clip bbox: a more precision clip method based on real window could be implemented
decoded_box_ymin = dMAX(dMIN(decoded_box_ymin, inputHeight - 1.0), 0.0);
decoded_box_xmin = dMAX(dMIN(decoded_box_xmin, inputWidth - 1.0), 0.0);
decoded_box_ymax = dMAX(dMIN(decoded_box_ymax, inputHeight - 1.0), 0.0);
decoded_box_xmax = dMAX(dMIN(decoded_box_xmax, inputWidth - 1.0), 0.0);
bbox_out[blockOffset + cur_id].y1 = __float2half(decoded_box_ymin);
bbox_out[blockOffset + cur_id].x1 = __float2half(decoded_box_xmin);
bbox_out[blockOffset + cur_id].y2 = __float2half(decoded_box_ymax);
bbox_out[blockOffset + cur_id].x2 = __float2half(decoded_box_xmax);
}
}
}
cudaError_t DecodeBBoxes(cudaStream_t stream, int N,
int samples, // number of anchors per image
const float* regWeight, const float inputHeight, const float inputWidth,
const void* anchors, // [N, anchors, (y1, x1, y2, x2)]
const void* delta, //[N, anchors, (dy, dx, log(dh), log(dw)])
void* outputBbox, //[N, anchors, (y1, x1, y2, x2)]
nvinfer1::DataType dtype
)
{
int blocks = N;
int threads = dMIN(samples, 1024);
// delta multiply bbox_std
// apply delta steps:
// cy = anchor_cy + dy*height
// cx = anchor_cx + dx*weight
// h = exp(dh)*anchor_h
// w = exp(dw)*anchor_w
// clip the bbox in absolute coordinates
float bboxClipThresh = log(1000.0f / 16.0f);
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
decode_bboxes_kernel<<<blocks, threads, 0, stream>>>(
samples, anchors, delta, regWeight, inputHeight, inputWidth, outputBbox, bboxClipThresh);
break;
}
case nvinfer1::DataType::kHALF:
{
decode_bboxes_kernel_half<<<blocks, threads, 0, stream>>>(
samples, anchors, delta, regWeight, inputHeight, inputWidth, outputBbox, bboxClipThresh);
break;
}
default: assert(false);
}
return cudaGetLastError();
}
__global__ void apply_delta_kernel(int samples, const void* anchors, const void* delta, void* outputBbox)
{
const BBOX* anchors_in = static_cast<const BBOX*>(anchors);
const DELTA* delta_in = static_cast<const DELTA*>(delta);
BBOX* bbox_out = static_cast<BBOX*>(outputBbox);
int N = blockIdx.x;
int blockOffset = N * samples;
int totalItems = (samples + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < samples)
{
BBOX cur_anchor_yxyx = anchors_in[blockOffset + cur_id];
// convert yxyx -> cyxhw
// cy, cx, h, w
BBOX cur_anchor_cyxhw;
cur_anchor_cyxhw.y1 = (cur_anchor_yxyx.y1 + cur_anchor_yxyx.y2) / 2;
cur_anchor_cyxhw.x1 = (cur_anchor_yxyx.x1 + cur_anchor_yxyx.x2) / 2;
cur_anchor_cyxhw.y2 = (cur_anchor_yxyx.y2 - cur_anchor_yxyx.y1);
cur_anchor_cyxhw.x2 = (cur_anchor_yxyx.x2 - cur_anchor_yxyx.x1);
DELTA cur_delta = delta_in[blockOffset + cur_id];
// multiply std_dev
cur_delta.dy *= 0.1;
cur_delta.dx *= 0.1;
cur_delta.logdh *= 0.2;
cur_delta.logdw *= 0.2;
// apply delta
cur_anchor_cyxhw.y1 += cur_delta.dy * cur_anchor_cyxhw.y2;
cur_anchor_cyxhw.x1 += cur_delta.dx * cur_anchor_cyxhw.x2;
cur_anchor_cyxhw.y2 *= expf(cur_delta.logdh);
cur_anchor_cyxhw.x2 *= expf(cur_delta.logdw);
cur_anchor_yxyx.y1 = cur_anchor_cyxhw.y1 - 0.5 * cur_anchor_cyxhw.y2;
cur_anchor_yxyx.x1 = cur_anchor_cyxhw.x1 - 0.5 * cur_anchor_cyxhw.x2;
cur_anchor_yxyx.y2 = cur_anchor_yxyx.y1 + cur_anchor_cyxhw.y2;
cur_anchor_yxyx.x2 = cur_anchor_yxyx.x1 + cur_anchor_cyxhw.x2;
// clip bbox: a more precision clip method based on real window could be implemented
cur_anchor_yxyx.y1 = dMAX(dMIN(cur_anchor_yxyx.y1, 1.0), 0.0);
cur_anchor_yxyx.x1 = dMAX(dMIN(cur_anchor_yxyx.x1, 1.0), 0.0);
cur_anchor_yxyx.y2 = dMAX(dMIN(cur_anchor_yxyx.y2, 1.0), 0.0);
cur_anchor_yxyx.x2 = dMAX(dMIN(cur_anchor_yxyx.x2, 1.0), 0.0);
bbox_out[blockOffset + cur_id].y1 = cur_anchor_yxyx.y1;
bbox_out[blockOffset + cur_id].x1 = cur_anchor_yxyx.x1;
bbox_out[blockOffset + cur_id].y2 = cur_anchor_yxyx.y2;
bbox_out[blockOffset + cur_id].x2 = cur_anchor_yxyx.x2;
}
}
}
cudaError_t ApplyDelta2Bboxes(cudaStream_t stream, int N,
int samples, // number of anchors per image
const void* anchors, // [N, anchors, (y1, x1, y2, x2)]
const void* delta, //[N, anchors, (dy, dx, log(dh), log(dw)])
void* outputBbox //[N, anchors, (y1, x1, y2, x2)]
)
{
int blocks = N;
int threads = dMIN(samples, 1024);
// delta multiply bbox_std
// apply delta steps:
// cy = anchor_cy + dy*height
// cx = anchor_cx + dx*weight
// h = exp(dh)*anchor_h
// w = exp(dw)*anchor_w
// clip the bbox
apply_delta_kernel<<<blocks, threads, 0, stream>>>(samples, anchors, delta, outputBbox);
return cudaGetLastError();
}
template <typename Tfeat>
__device__ inline Tfeat interpolateBilinear(const Tfeat* src, xy_t srcDims, float y, float x)
{
const int y0 = static_cast<int>(y);
const float yAlpha = y - static_cast<float>(y0);
const int x0 = static_cast<int>(x);
const float xAlpha = x - static_cast<float>(x0);
assert(y0 < srcDims.y);
assert(x0 < srcDims.x);
const int y1 = (yAlpha == 0) ? y0 : y0 + 1; // ceil
const int x1 = (xAlpha == 0) ? x0 : x0 + 1; // ceil
assert(y1 < srcDims.y);
assert(x1 < srcDims.x);
const Tfeat src00 = src[(y0) *srcDims.x + (x0)];
const Tfeat src01 = src[(y0) *srcDims.x + (x1)];
const Tfeat src10 = src[(y1) *srcDims.x + (x0)];
const Tfeat src11 = src[(y1) *srcDims.x + (x1)];
const Tfeat src0 = src00 * (1.0 - xAlpha) + src01 * xAlpha;
const Tfeat src1 = src10 * (1.0 - xAlpha) + src11 * xAlpha;
return src0 * (1.0 - yAlpha) + src1 * yAlpha;
}
template <>
__device__ inline __half interpolateBilinear(const __half* src, xy_t srcDims, float y, float x)
{
const int y0 = static_cast<int>(y);
const float yAlpha = y - static_cast<float>(y0);
const int x0 = static_cast<int>(x);
const float xAlpha = x - static_cast<float>(x0);
assert(y0 < srcDims.y);
assert(x0 < srcDims.x);
const int y1 = (yAlpha == 0) ? y0 : y0 + 1; // ceil
const int x1 = (xAlpha == 0) ? x0 : x0 + 1; // ceil
assert(y1 < srcDims.y);
assert(x1 < srcDims.x);
const __half src00 = src[(y0) *srcDims.x + (x0)];
const __half src01 = src[(y0) *srcDims.x + (x1)];
const __half src10 = src[(y1) *srcDims.x + (x0)];
const __half src11 = src[(y1) *srcDims.x + (x1)];
const __half src0 = add_fb(mul_fb(src00, (1.0 - xAlpha)), mul_fb(src01, xAlpha));
const __half src1 = add_fb(mul_fb(src10, (1.0 - xAlpha)), mul_fb(src11, xAlpha));
return add_fb(mul_fb(src0, (1.0 - yAlpha)), mul_fb(src1, yAlpha));
}
template <typename Trois, typename Tfeat>
__global__ void roiAlign_kernel(int featureCount, int roiCount,
float threshold, const Trois* rois,
const Tfeat* P2, const xy_t P2dims, const Tfeat* P3, const xy_t P3dims, const Tfeat* P4, const xy_t P4dims,
const Tfeat* P5, const xy_t P5dims,
Tfeat* pooled, const xy_t poolDims)
{
const int batch = blockIdx.x;
const int feature = blockIdx.y;
for (int roiIdx = threadIdx.x; roiIdx < roiCount; roiIdx += blockDim.x)
{
const Trois* roi = rois + 4 * (batch * roiCount + roiIdx);
const float y1 = roi[0];
const float x1 = roi[1];
const float y2 = roi[2];
const float x2 = roi[3];
if (!(0 <= y1 && y1 <= 1 && 0 <= x1 && x1 <= 1 && 0 <= y2 && y2 <= 1 && 0 <= x2 && x2 <= 1 && y1 < y2
&& x1 < x2))
{
continue;
}
else
{
}
const float hw = (y2 - y1) * (x2 - x1);
const Tfeat* src = P2;
xy_t srcDims = P2dims;
int iP = 2;
if (hw > threshold)
{
src = P3;
srcDims = P3dims;
++iP;
}
threshold *= 4;
if (hw > threshold)
{
src = P4;
srcDims = P4dims;
++iP;
}
threshold *= 4;
if (hw > threshold)
{
src = P5;
srcDims = P5dims;
++iP;
}
src += srcDims.x * srcDims.y * (batch * featureCount + feature);
Tfeat* dst
= pooled + poolDims.x * poolDims.y * (batch * roiCount * featureCount + roiIdx * featureCount + feature);
const float yStart = y1 * (srcDims.y - 1);
const float xStart = x1 * (srcDims.x - 1);
const float yEnd = y2 * (srcDims.y - 1);
const float xEnd = x2 * (srcDims.x - 1);
const float yDelta = (yEnd - yStart) / (poolDims.y - 1);
const float xDelta = (xEnd - xStart) / (poolDims.x - 1);
for (int yy = 0; yy < poolDims.y; ++yy)
{
const float ySample = min(yStart + yDelta * yy, yEnd);
for (int xx = 0; xx < poolDims.x; ++xx)
{
const float xSample = min(xStart + xDelta * xx, xEnd);
float result = interpolateBilinear(src, srcDims, ySample, xSample);
*dst = result;
dst++;
}
}
}
}
cudaError_t roiAlign(cudaStream_t stream, int batchSize, int featureCount, int roiCount, float firstThreshold,
const void* rois, const void* const layers[], const xy_t* layerDims,
void* pooled, const xy_t poolDims)
{
const dim3 blocks(batchSize, featureCount);
const int threads(256);
roiAlign_kernel<<<blocks, threads, 0, stream>>>(featureCount, roiCount, firstThreshold,
static_cast<const float*>(rois),
static_cast<const float*>(layers[0]), layerDims[0], static_cast<const float*>(layers[1]), layerDims[1],
static_cast<const float*>(layers[2]), layerDims[2], static_cast<const float*>(layers[3]), layerDims[3],
static_cast<float*>(pooled), poolDims);
return cudaGetLastError();
}
template <typename Trois, typename Tfeat>
__global__ void roiAlignHalfCenter_kernel(int featureCount, int roiCount,
float threshold, int inputHeight, int inputWidth, const void* rois_,
const void* const P2_, const xy_t P2dims, const void* const P3_, const xy_t P3dims, const void* const P4_, const xy_t P4dims,
const void* const P5_, const xy_t P5dims, const void* const P6_, const xy_t P6dims,
void* pooled_, const xy_t poolDims)
{
const Trois* rois = static_cast<const Trois*>(rois_);
const Tfeat* P2 = static_cast<const Tfeat*>(P2_);
const Tfeat* P3 = static_cast<const Tfeat*>(P3_);
const Tfeat* P4 = static_cast<const Tfeat*>(P4_);
const Tfeat* P5 = static_cast<const Tfeat*>(P5_);
const Tfeat* P6 = static_cast<const Tfeat*>(P6_);
Tfeat* pooled = static_cast<Tfeat* >(pooled_);
const int batch = blockIdx.x;
const int feature = blockIdx.y;
const int roiIdx = blockIdx.z;
const int total_item_cnt = poolDims.x * poolDims.y;
for (int itemIdx = threadIdx.x; itemIdx < total_item_cnt; itemIdx += blockDim.x)
{
const Trois* roi = rois + 4 * (batch * roiCount + roiIdx);
const float y1 = roi[0];
const float x1 = roi[1];
const float y2 = roi[2];
const float x2 = roi[3];
if (!(0 <= y1 && y1 <= inputHeight && 0 <= x1 && x1 <= inputWidth && 0 <= y2 && y2 <= inputHeight && 0 <= x2
&& x2 <= inputWidth && y1 < y2 && x1 < x2))
{
continue;
}
else
{
}
const float hw = (y2 - y1) * (x2 - x1);
const Tfeat* src = P2;
xy_t srcDims = P2dims;
int iP = 2;
float threshold_per_item = threshold;
if (hw > threshold_per_item)
{
src = P3;
srcDims = P3dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P4;
srcDims = P4dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P5;
srcDims = P5dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P6;
srcDims = P6dims;
++iP;
}
src += srcDims.x * srcDims.y * (batch * featureCount + feature);
Tfeat* dst
= pooled + poolDims.x * poolDims.y * (batch * roiCount * featureCount + roiIdx * featureCount + feature) + itemIdx;
float scale_to_level = 1.0f;
for (int i = 0; i < iP; i++)
{
scale_to_level *= 2.0f;
}
const float yStart = y1 / scale_to_level;
const float xStart = x1 / scale_to_level;
const float yEnd = y2 / scale_to_level;
const float xEnd = x2 / scale_to_level;
const float yDelta = (yEnd - yStart) / (poolDims.y);
const float xDelta = (xEnd - xStart) / (poolDims.x);
const int yy = itemIdx / poolDims.y;
const int xx = itemIdx % poolDims.x;
const float ySample = dMIN(dMAX(yStart + yDelta * (yy + 0.5), 0.0f), srcDims.y - 1.0f);
const float xSample = dMIN(dMAX(xStart + xDelta * (xx + 0.5), 0.0f), srcDims.x - 1.0f);
Tfeat result = interpolateBilinear<Tfeat>(src, srcDims, ySample, xSample);
*dst = result;
}
}
template <>
__global__ void roiAlignHalfCenter_kernel<__half, __half>(int featureCount, int roiCount,
float threshold, int inputHeight, int inputWidth, const void* rois_,
const void* const P2_, const xy_t P2dims, const void* const P3_, const xy_t P3dims, const void* const P4_, const xy_t P4dims,
const void* const P5_, const xy_t P5dims, const void* const P6_, const xy_t P6dims,
void* pooled_, const xy_t poolDims)
{
const __half* rois = static_cast<const __half*>(rois_);
const __half* P2 = static_cast<const __half*>(P2_);
const __half* P3 = static_cast<const __half*>(P3_);
const __half* P4 = static_cast<const __half*>(P4_);
const __half* P5 = static_cast<const __half*>(P5_);
const __half* P6 = static_cast<const __half*>(P6_);
__half* pooled = static_cast<__half* >(pooled_);
const int batch = blockIdx.x;
const int feature = blockIdx.y;
const int roiIdx = blockIdx.z;
const int total_item_cnt = poolDims.x * poolDims.y;
for (int itemIdx = threadIdx.x; itemIdx < total_item_cnt; itemIdx += blockDim.x)
{
const __half* roi = rois + 4 * (batch * roiCount + roiIdx);
const float y1 = __half2float(roi[0]);
const float x1 = __half2float(roi[1]);
const float y2 = __half2float(roi[2]);
const float x2 = __half2float(roi[3]);
if (!(0 <= y1 && y1 <= inputHeight && 0 <= x1 && x1 <= inputWidth && 0 <= y2 && y2 <= inputHeight && 0 <= x2
&& x2 <= inputWidth && y1 < y2 && x1 < x2))
{
continue;
}
else
{
}
const float hw = (y2 - y1) * (x2 - x1);
const __half* src = P2;
xy_t srcDims = P2dims;
int iP = 2;
float threshold_per_item = threshold;
if (hw > threshold_per_item)
{
src = P3;
srcDims = P3dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P4;
srcDims = P4dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P5;
srcDims = P5dims;
++iP;
}
threshold_per_item *= 4;
if (hw > threshold_per_item)
{
src = P6;
srcDims = P6dims;
++iP;
}
src += srcDims.x * srcDims.y * (batch * featureCount + feature);
__half* dst
= pooled + poolDims.x * poolDims.y * (batch * roiCount * featureCount + roiIdx * featureCount + feature) + itemIdx;
float scale_to_level = 1.0f;
for (int i = 0; i < iP; i++)
{
scale_to_level *= 2.0f;
}
const float yStart = y1 / scale_to_level;
const float xStart = x1 / scale_to_level;
const float yEnd = y2 / scale_to_level;
const float xEnd = x2 / scale_to_level;
const float yDelta = (yEnd - yStart) / (poolDims.y);
const float xDelta = (xEnd - xStart) / (poolDims.x);
const int yy = itemIdx / poolDims.y;
const int xx = itemIdx % poolDims.x;
const float ySample = dMIN(dMAX(yStart + yDelta * (yy + 0.5), 0.0f), srcDims.y - 1.0f);
const float xSample = dMIN(dMAX(xStart + xDelta * (xx + 0.5), 0.0f), srcDims.x - 1.0f);
__half result = interpolateBilinear<__half>(src, srcDims, ySample, xSample);
*dst = result;
}
}
cudaError_t roiAlignHalfCenter(cudaStream_t stream, int batchSize, int featureCount, int roiCount, float firstThreshold,
int inputHeight, int inputWidth, const void* rois, const void* const layers[], const xy_t* layerDims,
void* pooled, const xy_t poolDims, const DataType dtype)
{
const dim3 blocks(batchSize, featureCount, roiCount);
const int threads(64);
switch (dtype){
case nvinfer1::DataType::kFLOAT:
{
roiAlignHalfCenter_kernel<float, float><<<blocks, threads, 0, stream>>>(featureCount, roiCount, firstThreshold, inputHeight,
inputWidth, rois, layers[0], layerDims[0],
layers[1], layerDims[1], layers[2], layerDims[2],
layers[3], layerDims[3], layers[4], layerDims[4],
pooled, poolDims);
break;
}
case nvinfer1::DataType::kHALF:
{
roiAlignHalfCenter_kernel<__half, __half><<<blocks, threads, 0, stream>>>(featureCount, roiCount, firstThreshold, inputHeight,
inputWidth, rois, layers[0], layerDims[0],
layers[1], layerDims[1], layers[2], layerDims[2],
layers[3], layerDims[3], layers[4], layerDims[4],
pooled, poolDims);
break;
}
default: assert(false);
}
return cudaGetLastError();
}
__global__ void resize_nearest_kernel_2d(int nbatch, float scale, int2 osize, float const* idata, int istride,
int ibatchstride, float* odata, int ostride, int obatchstride)
{
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int y0 = threadIdx.y + blockIdx.y * blockDim.y;
int z0 = blockIdx.z;
for (int batch = z0; batch < nbatch; batch += gridDim.z)
{
for (int oy = y0; oy < osize.y; oy += blockDim.y * gridDim.y)
{
for (int ox = x0; ox < osize.x; ox += blockDim.x * gridDim.x)
{
int ix = int(ox / scale);
int iy = int(oy / scale);
odata[batch * obatchstride + oy * ostride + ox] = idata[batch * ibatchstride + iy * istride + ix];
}
}
}
}
void resizeNearest(dim3 grid, dim3 block, cudaStream_t stream, int nbatch, float scale, int2 osize, float const* idata,
int istride, int ibatchstride, float* odata, int ostride, int obatchstride)
{
resize_nearest_kernel_2d<<<grid, block, 0, stream>>>(
nbatch, scale, osize, idata, istride, ibatchstride, odata, ostride, obatchstride);
}
struct BOX
{
float y1, x1, y2, x2;
};
struct DETECTION
{
float y1, x1, y2, x2, class_id, score;
};
__global__ void specialslice_kernel(int samples, const void* idata, void* odata)
{
int N = blockIdx.x;
int blockOffset = N * samples;
int totalItems = (samples + (blockDim.x - 1)) / blockDim.x;
const DETECTION* in_detections = static_cast<const DETECTION*>(idata);
BOX* out_bboxes = static_cast<BOX*>(odata);
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < samples)
{
out_bboxes[blockOffset + cur_id].y1 = in_detections[blockOffset + cur_id].y1;
out_bboxes[blockOffset + cur_id].x1 = in_detections[blockOffset + cur_id].x1;
out_bboxes[blockOffset + cur_id].y2 = in_detections[blockOffset + cur_id].y2;
out_bboxes[blockOffset + cur_id].x2 = in_detections[blockOffset + cur_id].x2;
}
}
}
void specialSlice(cudaStream_t stream, int batch_size, int boxes_cnt, const void* idata, void* odata)
{
int blocks = batch_size;
int threads = dMIN(boxes_cnt, 2048);
specialslice_kernel<<<blocks, threads, 0, stream>>>(boxes_cnt, idata, odata);
}
template <typename Dtype>
__global__ void concatenate(int featureCnt, int sampleCnt, const void* const* inScores, const void* const* inBBox,
void* outScore, void* outBBox)
{
int N = blockIdx.x;
int outBlockOffset = N * sampleCnt * featureCnt;
int inBlockOffset = N * sampleCnt;
int itemsPerThread = (sampleCnt + blockDim.x - 1) / blockDim.x;
Dtype* outScorePtr = static_cast<Dtype*>(outScore);
BBoxT<Dtype>* outBBoxPtr = static_cast<BBoxT<Dtype>*>(outBBox);
for (int fId = 0; fId < featureCnt; fId++)
{
const Dtype* fInScorePtr = static_cast<const Dtype*>(inScores[fId]);
const BBoxT<Dtype>* fInBBoxPtr = static_cast<const BBoxT<Dtype>*>(inBBox[fId]);
int featureOffset = fId * sampleCnt;
for (int i = 0; i < itemsPerThread; i++)
{
int curId = i * blockDim.x + threadIdx.x;
if (curId < sampleCnt)
{
outScorePtr[outBlockOffset + featureOffset + curId] = fInScorePtr[inBlockOffset + curId];
outBBoxPtr[outBlockOffset + featureOffset + curId] = fInBBoxPtr[inBlockOffset + curId];
}
}
}
}
template <typename Dtype>
__global__ void resampleBBox_kernel(int orig_size, int sample_size, const void* orig_bbox_ptr, void* sampled_bbox_ptr)
{
const BBoxT<Dtype>* in_bbox = static_cast<const BBoxT<Dtype>*>(orig_bbox_ptr);
BBoxT<Dtype>* out_bbox = static_cast<BBoxT<Dtype>*>(sampled_bbox_ptr);
int N = blockIdx.x;
int blockOffset_in = N * orig_size;
int blockOffset_out = N * sample_size;
int totalItems = (sample_size + (blockDim.x - 1)) / blockDim.x;
for (int i = 0; i < totalItems; i++)
{
int cur_id = i * blockDim.x + threadIdx.x;
if (cur_id < sample_size)
{
out_bbox[blockOffset_out + cur_id] = in_bbox[blockOffset_in + cur_id];
}
}
}
cudaError_t ConcatTopK(cudaStream_t stream, int N, int featureCnt, int topK, nvinfer1::DataType dtype, void* workspace,
const ConcatTopKWorkSpace& spaceOffset, void** inScores, void** inBBox, void* outProposals)
{
// Prepare Offset
int8_t* wsPtr = static_cast<int8_t*>(workspace);
void* tempStoragePtr = wsPtr + spaceOffset.tempStorageOffset;
void* concatedScorePtr = wsPtr + spaceOffset.concatedScoreOffset;
void* concatedBBoxPtr = wsPtr + spaceOffset.concatedBBoxOffset;
void* sortedScorePtr = wsPtr + spaceOffset.sortedScoreOffset;
void* sortedBBoxPtr = wsPtr + spaceOffset.sortedBBoxOffset;
int blocks = N; // batch_size
int threads = dMIN(topK, 2048);
// Concat Scores and inBBox
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
concatenate<float>
<<<blocks, threads, 0, stream>>>(featureCnt, topK, inScores, inBBox, concatedScorePtr, concatedBBoxPtr);
CUASSERT(cudaGetLastError());
break;
case nvinfer1::DataType::kHALF:
concatenate<__half>
<<<blocks, threads, 0, stream>>>(featureCnt, topK, inScores, inBBox, concatedScorePtr, concatedBBoxPtr);
CUASSERT(cudaGetLastError());
break;
default: assert(false);
}
// Sort and sample topK
int itemCnt = topK * featureCnt;
int* offsets = static_cast<int*>(tempStoragePtr);
set_offset_kernel<<<1, 1024, 0, stream>>>(itemCnt, N + 1, offsets);
assert(cudaGetLastError() == cudaSuccess);
tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1));
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
{
score_bbox_cub_sort<float>(tempStoragePtr, concatedScorePtr, sortedScorePtr,
concatedBBoxPtr, sortedBBoxPtr, N * itemCnt, N,
offsets, stream);
break;
}
case nvinfer1::DataType::kHALF:
{
score_bbox_cub_sort<__half>(tempStoragePtr, concatedScorePtr, sortedScorePtr,
concatedBBoxPtr, sortedBBoxPtr, N * itemCnt, N,
offsets, stream);
break;
}
default: assert(false);
}
// Sample
switch (dtype)
{
case nvinfer1::DataType::kFLOAT:
resampleBBox_kernel<float><<<N, dMIN(topK, 1024), 0, stream>>>(itemCnt, topK, sortedBBoxPtr, outProposals);
CUASSERT(cudaGetLastError());
break;
case nvinfer1::DataType::kHALF:
resampleBBox_kernel<__half><<<N, dMIN(topK, 1024), 0, stream>>>(itemCnt, topK, sortedBBoxPtr, outProposals);
CUASSERT(cudaGetLastError());
break;
default: assert(false);
}
assert(cudaGetLastError() == cudaSuccess);
return cudaGetLastError();
}
|
a524a5ef8f6c70b47ba150f300d07f676807429b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include "../gl_helper.h"
#include "../cpu_bitmap.h"
#include "tools.hpp"
__global__ void add(int a, int b, int * c) {
*c = a + b;
}
__global__ void addArray(int * a, int * b, int * c) {
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
__global__ void addArrayLong(int * a, int * b, int * c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
//the while loop
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
void doAdd(int a, int b) {
int * dev_c;
int c;
hipMalloc((void**)&dev_c, sizeof(int));
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, a, b, dev_c);
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
std::cout << "The result of " << a << "+" << b << " is " << c << std::endl;
hipFree(dev_c);
}
void doAddArray(int * a, int * b) {
int * dev_a;
int * dev_b;
int * dev_c;
int c[N] = {0};
const size_t size = N * sizeof(int);
CUDA_CHECK_ERROR(hipMalloc((void**)&dev_a, size));
CUDA_CHECK_ERROR(hipMalloc((void**)&dev_b, size));
CUDA_CHECK_ERROR(hipMalloc((void**)&dev_c, size));
CUDA_CHECK_ERROR(hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice));
CUDA_CHECK_ERROR(hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( addArray), dim3(N), dim3(1), 0, 0, dev_a, dev_b, dev_c);
CUDA_CHECK_ERROR(hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost));
for (int i = 0; i < N; ++i)
std::cout << a[i] << " + " << b[i] << " = " << c[i] << std::endl;
CUDA_CHECK_ERROR(hipFree(dev_a));
CUDA_CHECK_ERROR(hipFree(dev_b));
CUDA_CHECK_ERROR(hipFree(dev_c));
}
void doAddArrayLong(int * a, int * b) {
int * dev_a;
int * dev_b;
int * dev_c;
int c[N] = {0};
const size_t size = N * sizeof(int);
CUDA_CHECK_ERROR(hipMalloc((void**)&dev_a, size));
CUDA_CHECK_ERROR(hipMalloc((void**)&dev_b, size));
CUDA_CHECK_ERROR(hipMalloc((void**)&dev_c, size));
CUDA_CHECK_ERROR(hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice));
CUDA_CHECK_ERROR(hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice));
//can't exceed maxThreadsPerBlock
hipLaunchKernelGGL(( addArrayLong), dim3(1024), dim3(1024), 0, 0, dev_a, dev_b, dev_c);
CUDA_CHECK_ERROR(hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost));
bool success = true;
for (int i = 0; i < N; ++i) {
if (c[i] != a[i] + b[i]) {
std::cerr << a[i] << " + " << b[i] << " = " << c[i] << std::endl;
success = false;
}
}
if (success)
std::cout << "successfully compute!" << std::endl;
CUDA_CHECK_ERROR(hipFree(dev_a));
CUDA_CHECK_ERROR(hipFree(dev_b));
CUDA_CHECK_ERROR(hipFree(dev_c));
CUDA_CHECK_ERROR(hipDeviceReset());
}
struct hipComplex {
float r;
float i;
__device__ hipComplex(float a, float b) : r(a), i(b) {}
__device__ float magnitude2(void) {
return r * r + i + i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 0.9;
float jx = scale * (float)(DIM/2 - x) / (DIM / 2);
float jy = scale * (float)(DIM/2 - y) / (DIM / 2);
hipComplex c(-0.91, 0.125);
hipComplex a(jx, jy);
for (int i = 0; i < 200; ++i) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void JuliaKernel(unsigned char * ptr) {
//int x = blockIdx.x;
//int y = blockIdx.y;
//int offset = x + y * gridDim.x;
int x = threadIdx.x;
int y = threadIdx.y;
int offset = x + y * blockDim.x;
int juliaValue = julia(x, y);
ptr[offset * 4 + 0] = 100 * juliaValue;
ptr[offset * 4 + 1] = 250;
ptr[offset * 4 + 2] = 20;
ptr[offset * 4 + 3] = 1;
}
void DrawJulia(void) {
CPUBitmap bitmap(DIM, DIM);
unsigned char * dev_bitmap;
CUDA_CHECK_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size()));
dim3 grid(DIM, DIM);
hipLaunchKernelGGL(( JuliaKernel), dim3(1), dim3(grid), 0, 0, dev_bitmap);
CUDA_CHECK_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
bitmap.display_and_exit();
CUDA_CHECK_ERROR(hipFree(dev_bitmap));
CUDA_CHECK_ERROR(hipDeviceReset());
}
| a524a5ef8f6c70b47ba150f300d07f676807429b.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include "../gl_helper.h"
#include "../cpu_bitmap.h"
#include "tools.hpp"
__global__ void add(int a, int b, int * c) {
*c = a + b;
}
__global__ void addArray(int * a, int * b, int * c) {
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
__global__ void addArrayLong(int * a, int * b, int * c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
//the while loop
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
void doAdd(int a, int b) {
int * dev_c;
int c;
cudaMalloc((void**)&dev_c, sizeof(int));
add<<<1, 1>>>(a, b, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "The result of " << a << "+" << b << " is " << c << std::endl;
cudaFree(dev_c);
}
void doAddArray(int * a, int * b) {
int * dev_a;
int * dev_b;
int * dev_c;
int c[N] = {0};
const size_t size = N * sizeof(int);
CUDA_CHECK_ERROR(cudaMalloc((void**)&dev_a, size));
CUDA_CHECK_ERROR(cudaMalloc((void**)&dev_b, size));
CUDA_CHECK_ERROR(cudaMalloc((void**)&dev_c, size));
CUDA_CHECK_ERROR(cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice));
CUDA_CHECK_ERROR(cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice));
addArray<<<N, 1>>>(dev_a, dev_b, dev_c);
CUDA_CHECK_ERROR(cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost));
for (int i = 0; i < N; ++i)
std::cout << a[i] << " + " << b[i] << " = " << c[i] << std::endl;
CUDA_CHECK_ERROR(cudaFree(dev_a));
CUDA_CHECK_ERROR(cudaFree(dev_b));
CUDA_CHECK_ERROR(cudaFree(dev_c));
}
void doAddArrayLong(int * a, int * b) {
int * dev_a;
int * dev_b;
int * dev_c;
int c[N] = {0};
const size_t size = N * sizeof(int);
CUDA_CHECK_ERROR(cudaMalloc((void**)&dev_a, size));
CUDA_CHECK_ERROR(cudaMalloc((void**)&dev_b, size));
CUDA_CHECK_ERROR(cudaMalloc((void**)&dev_c, size));
CUDA_CHECK_ERROR(cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice));
CUDA_CHECK_ERROR(cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice));
//can't exceed maxThreadsPerBlock
addArrayLong<<<1024, 1024>>>(dev_a, dev_b, dev_c);
CUDA_CHECK_ERROR(cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost));
bool success = true;
for (int i = 0; i < N; ++i) {
if (c[i] != a[i] + b[i]) {
std::cerr << a[i] << " + " << b[i] << " = " << c[i] << std::endl;
success = false;
}
}
if (success)
std::cout << "successfully compute!" << std::endl;
CUDA_CHECK_ERROR(cudaFree(dev_a));
CUDA_CHECK_ERROR(cudaFree(dev_b));
CUDA_CHECK_ERROR(cudaFree(dev_c));
CUDA_CHECK_ERROR(cudaDeviceReset());
}
struct cuComplex {
float r;
float i;
__device__ cuComplex(float a, float b) : r(a), i(b) {}
__device__ float magnitude2(void) {
return r * r + i + i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 0.9;
float jx = scale * (float)(DIM/2 - x) / (DIM / 2);
float jy = scale * (float)(DIM/2 - y) / (DIM / 2);
cuComplex c(-0.91, 0.125);
cuComplex a(jx, jy);
for (int i = 0; i < 200; ++i) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void JuliaKernel(unsigned char * ptr) {
//int x = blockIdx.x;
//int y = blockIdx.y;
//int offset = x + y * gridDim.x;
int x = threadIdx.x;
int y = threadIdx.y;
int offset = x + y * blockDim.x;
int juliaValue = julia(x, y);
ptr[offset * 4 + 0] = 100 * juliaValue;
ptr[offset * 4 + 1] = 250;
ptr[offset * 4 + 2] = 20;
ptr[offset * 4 + 3] = 1;
}
void DrawJulia(void) {
CPUBitmap bitmap(DIM, DIM);
unsigned char * dev_bitmap;
CUDA_CHECK_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size()));
dim3 grid(DIM, DIM);
JuliaKernel<<<1, grid>>>(dev_bitmap);
CUDA_CHECK_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
bitmap.display_and_exit();
CUDA_CHECK_ERROR(cudaFree(dev_bitmap));
CUDA_CHECK_ERROR(cudaDeviceReset());
}
|
16bd3cfe59ef7499dab529b0a1b3e0b3a689c771.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h> // helper functions CUDA error checking and initialization
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
// const int OPT_N = 4000000;
// const int NUM_ITERATIONS = 512;
#define OPT_N 4000000
#define NUM_ITERATIONS 512
// const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Start logs
printf("[%s] - Starting...\n", argv[0]);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
// double delta, ref, sum_delta, sum_ref, max_delta, L1norm;
double gpuTime;
StopWatchInterface *hTimer = NULL;
int i;
findCudaDevice(argc, (const char **)argv);
// determine how many options to process
int opt_n = OPT_N;
if (checkCmdLineFlag(argc, (const char**) argv, "options"))
{
opt_n = getCmdLineArgumentInt(argc, (const char**)argv, "options");
if (opt_n < 1) {
printf("Error: \"number of options\" specified %d is invalid\n", opt_n);
exit(EXIT_FAILURE);
}
}
const int opt_sz = opt_n * sizeof(float);
// how many benchmarking iterations
int num_iterations = NUM_ITERATIONS;
if (checkCmdLineFlag(argc, (const char**) argv, "samples"))
{
num_iterations = getCmdLineArgumentInt(argc, (const char**)argv, "samples");
if (opt_n < 1) {
printf("Error: \"number of benchmark samples\" specified %d is invalid\n", num_iterations);
exit(EXIT_FAILURE);
}
}
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(opt_sz);
h_PutResultCPU = (float *)malloc(opt_sz);
h_CallResultGPU = (float *)malloc(opt_sz);
h_PutResultGPU = (float *)malloc(opt_sz);
h_StockPrice = (float *)malloc(opt_sz);
h_OptionStrike = (float *)malloc(opt_sz);
h_OptionYears = (float *)malloc(opt_sz);
printf("...allocating GPU memory for options.\n");
// checkCudaErrors(hipMalloc((void **)&d_CallResult, opt_sz));
// checkCudaErrors(hipMalloc((void **)&d_PutResult, opt_sz));
checkCudaErrors(hipMalloc((void **)&d_StockPrice, opt_sz));
checkCudaErrors(hipMalloc((void **)&d_OptionStrike, opt_sz));
checkCudaErrors(hipMalloc((void **)&d_OptionYears, opt_sz));
printf("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for (i = 0; i < opt_n; i++)
{
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(hipMemcpy(d_StockPrice, h_StockPrice, opt_sz, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_OptionStrike, h_OptionStrike, opt_sz, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_OptionYears, h_OptionYears, opt_sz, hipMemcpyHostToDevice));
printf("Data init done.\n\n");
printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", num_iterations);
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (i = 0; i < num_iterations; i++)
{
checkCudaErrors(hipMalloc((void **)&d_CallResult, opt_sz)); // TLM
checkCudaErrors(hipMalloc((void **)&d_PutResult, opt_sz)); // TLM
hipLaunchKernelGGL(( BlackScholesGPU), dim3(480), dim3(128), 0, 0,
d_CallResult,
d_PutResult,
d_StockPrice,
d_OptionStrike,
d_OptionYears,
RISKFREE,
VOLATILITY,
opt_n
);
getLastCudaError("BlackScholesGPU() execution failed\n");
checkCudaErrors(hipFree(d_PutResult)); // TLM
checkCudaErrors(hipFree(d_CallResult)); // TLM
}
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / num_iterations;
//Both call and put is calculated
printf("Options count : %i\n", opt_n);
printf("BlackScholesGPU() time : %f msec\n", gpuTime);
printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * opt_n * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
printf("Gigaoptions per second : %f\n\n", ((double)(2 * opt_n) * 1E-9) / (gpuTime * 1E-3));
printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
(((double)(2.0 * opt_n) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * opt_n), 1, 128);
#if 0
printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(hipMemcpy(h_CallResultGPU, d_CallResult, opt_sz, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_PutResultGPU, d_PutResult, opt_sz, hipMemcpyDeviceToHost));
printf("Checking the results...\n");
printf("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
opt_n
);
printf("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < opt_n; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n", L1norm);
printf("Max absolute error: %E\n\n", max_delta);
#endif
printf("Shutting down...\n");
printf("...releasing GPU memory.\n");
checkCudaErrors(hipFree(d_OptionYears));
checkCudaErrors(hipFree(d_OptionStrike));
checkCudaErrors(hipFree(d_StockPrice));
// checkCudaErrors(hipFree(d_PutResult));
// checkCudaErrors(hipFree(d_CallResult));
printf("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
sdkDeleteTimer(&hTimer);
hipDeviceReset();
printf("Shutdown done.\n");
#if 0
printf("\n[BlackScholes] - Test Summary\n");
if (L1norm > 1e-6)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
#endif
}
| 16bd3cfe59ef7499dab529b0a1b3e0b3a689c771.cu | /*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h> // helper functions CUDA error checking and initialization
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
// const int OPT_N = 4000000;
// const int NUM_ITERATIONS = 512;
#define OPT_N 4000000
#define NUM_ITERATIONS 512
// const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Start logs
printf("[%s] - Starting...\n", argv[0]);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
// double delta, ref, sum_delta, sum_ref, max_delta, L1norm;
double gpuTime;
StopWatchInterface *hTimer = NULL;
int i;
findCudaDevice(argc, (const char **)argv);
// determine how many options to process
int opt_n = OPT_N;
if (checkCmdLineFlag(argc, (const char**) argv, "options"))
{
opt_n = getCmdLineArgumentInt(argc, (const char**)argv, "options");
if (opt_n < 1) {
printf("Error: \"number of options\" specified %d is invalid\n", opt_n);
exit(EXIT_FAILURE);
}
}
const int opt_sz = opt_n * sizeof(float);
// how many benchmarking iterations
int num_iterations = NUM_ITERATIONS;
if (checkCmdLineFlag(argc, (const char**) argv, "samples"))
{
num_iterations = getCmdLineArgumentInt(argc, (const char**)argv, "samples");
if (opt_n < 1) {
printf("Error: \"number of benchmark samples\" specified %d is invalid\n", num_iterations);
exit(EXIT_FAILURE);
}
}
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(opt_sz);
h_PutResultCPU = (float *)malloc(opt_sz);
h_CallResultGPU = (float *)malloc(opt_sz);
h_PutResultGPU = (float *)malloc(opt_sz);
h_StockPrice = (float *)malloc(opt_sz);
h_OptionStrike = (float *)malloc(opt_sz);
h_OptionYears = (float *)malloc(opt_sz);
printf("...allocating GPU memory for options.\n");
// checkCudaErrors(cudaMalloc((void **)&d_CallResult, opt_sz));
// checkCudaErrors(cudaMalloc((void **)&d_PutResult, opt_sz));
checkCudaErrors(cudaMalloc((void **)&d_StockPrice, opt_sz));
checkCudaErrors(cudaMalloc((void **)&d_OptionStrike, opt_sz));
checkCudaErrors(cudaMalloc((void **)&d_OptionYears, opt_sz));
printf("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for (i = 0; i < opt_n; i++)
{
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(cudaMemcpy(d_StockPrice, h_StockPrice, opt_sz, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_OptionStrike, h_OptionStrike, opt_sz, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_OptionYears, h_OptionYears, opt_sz, cudaMemcpyHostToDevice));
printf("Data init done.\n\n");
printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", num_iterations);
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (i = 0; i < num_iterations; i++)
{
checkCudaErrors(cudaMalloc((void **)&d_CallResult, opt_sz)); // TLM
checkCudaErrors(cudaMalloc((void **)&d_PutResult, opt_sz)); // TLM
BlackScholesGPU<<<480, 128>>>(
d_CallResult,
d_PutResult,
d_StockPrice,
d_OptionStrike,
d_OptionYears,
RISKFREE,
VOLATILITY,
opt_n
);
getLastCudaError("BlackScholesGPU() execution failed\n");
checkCudaErrors(cudaFree(d_PutResult)); // TLM
checkCudaErrors(cudaFree(d_CallResult)); // TLM
}
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / num_iterations;
//Both call and put is calculated
printf("Options count : %i\n", opt_n);
printf("BlackScholesGPU() time : %f msec\n", gpuTime);
printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * opt_n * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
printf("Gigaoptions per second : %f\n\n", ((double)(2 * opt_n) * 1E-9) / (gpuTime * 1E-3));
printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
(((double)(2.0 * opt_n) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * opt_n), 1, 128);
#if 0
printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(cudaMemcpy(h_CallResultGPU, d_CallResult, opt_sz, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_PutResultGPU, d_PutResult, opt_sz, cudaMemcpyDeviceToHost));
printf("Checking the results...\n");
printf("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
opt_n
);
printf("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < opt_n; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n", L1norm);
printf("Max absolute error: %E\n\n", max_delta);
#endif
printf("Shutting down...\n");
printf("...releasing GPU memory.\n");
checkCudaErrors(cudaFree(d_OptionYears));
checkCudaErrors(cudaFree(d_OptionStrike));
checkCudaErrors(cudaFree(d_StockPrice));
// checkCudaErrors(cudaFree(d_PutResult));
// checkCudaErrors(cudaFree(d_CallResult));
printf("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
sdkDeleteTimer(&hTimer);
cudaDeviceReset();
printf("Shutdown done.\n");
#if 0
printf("\n[BlackScholes] - Test Summary\n");
if (L1norm > 1e-6)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
#endif
}
|
2749223df177c5f5e1b160b6e707a66b36a4cbde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
__global__ void kernel( int* b, int* t)
{
if( !threadIdx.x)
{
b[blockIdx.x] = blockIdx.x;
}
t[blockDim.x *blockIdx.x + threadIdx.x] = threadIdx.x;
}
int main()
{
int numBlocks = 4;
int threadsPerBlock = 8;
int numThreads = numBlocks*threadsPerBlock;
int* b;
int* t;
// allocate b and t locally on host
b = new int[numBlocks];
t = new int[numThreads];
int* d_b;
int* d_t;
// allocate d_b and d_t on the device
hipMalloc( (void**)&d_b, numBlocks*sizeof(int));
hipMalloc( (void**)&d_t, numThreads*sizeof(int));
hipLaunchKernelGGL(( kernel), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_b,d_t);
// communicate values from device to host
hipMemcpy( b, d_b, numBlocks*sizeof(int)
, hipMemcpyDeviceToHost);
hipMemcpy( t, d_t, numThreads*sizeof(int)
, hipMemcpyDeviceToHost);
// free the memory allocated on the device
hipFree(d_b);
hipFree(d_t);
// output results
int block;
int thread;
for( block=0; block<numBlocks; block++)
{
cout << "b[" << block << "] = " << b[block];
cout << "; threads:";
for( thread=0; thread<threadsPerBlock; thread++)
{
cout << " " << t[block*threadsPerBlock + thread];
}
cout << endl;
}
// delete the locally allocate memory
delete [] t;
delete [] b;
return 0;
}
| 2749223df177c5f5e1b160b6e707a66b36a4cbde.cu |
#include <iostream>
using namespace std;
__global__ void kernel( int* b, int* t)
{
if( !threadIdx.x)
{
b[blockIdx.x] = blockIdx.x;
}
t[blockDim.x *blockIdx.x + threadIdx.x] = threadIdx.x;
}
int main()
{
int numBlocks = 4;
int threadsPerBlock = 8;
int numThreads = numBlocks*threadsPerBlock;
int* b;
int* t;
// allocate b and t locally on host
b = new int[numBlocks];
t = new int[numThreads];
int* d_b;
int* d_t;
// allocate d_b and d_t on the device
cudaMalloc( (void**)&d_b, numBlocks*sizeof(int));
cudaMalloc( (void**)&d_t, numThreads*sizeof(int));
kernel<<<numBlocks,threadsPerBlock>>>(d_b,d_t);
// communicate values from device to host
cudaMemcpy( b, d_b, numBlocks*sizeof(int)
, cudaMemcpyDeviceToHost);
cudaMemcpy( t, d_t, numThreads*sizeof(int)
, cudaMemcpyDeviceToHost);
// free the memory allocated on the device
cudaFree(d_b);
cudaFree(d_t);
// output results
int block;
int thread;
for( block=0; block<numBlocks; block++)
{
cout << "b[" << block << "] = " << b[block];
cout << "; threads:";
for( thread=0; thread<threadsPerBlock; thread++)
{
cout << " " << t[block*threadsPerBlock + thread];
}
cout << endl;
}
// delete the locally allocate memory
delete [] t;
delete [] b;
return 0;
}
|
58848c1ea99569e4b9b7e1df2a0ab21c72cfef3d.hip | // !!! This is a file automatically generated by hipify!!!
#define HELLO
/******************************************************************************
/* @file Playground kernel function. Have fun!
/*
/*
/* @author langenhagen
/* @version YYMMDD
/******************************************************************************/
#pragma once
///////////////////////////////////////////////////////////////////////////////
// INCLUDES project headers
///////////////////////////////////////////////////////////////////////////////
//INCLUDES C/C++ standard library (and other external libraries)
#include <cstdlib>
#include <iostream>
#include <math.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "thrust/sort.h"
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
//#include <barn_common.hpp> // << provokes linker errors!
#include <matrix.cuh>
#include <barn_cuda_common.cuh>
///////////////////////////////////////////////////////////////////////////////
// DEFINES and MACROS
///////////////////////////////////////////////////////////////////////////////
// NAMESPACE, CONSTANTS and TYPE DECLARATIONS/IMPLEMENTATIONS
using namespace std;
using namespace thrust;
typedef unsigned char uchar;
__global__
void play_kernel( Matrix<int>* m,
const int Kx,
const int Ky) {
const int tx = blockIdx.x * blockDim.x + threadIdx.x;
if( tx+1 > m->n_cells())
return;
const int r = tx / m->cols;
const int c = tx - r*m->cols;
const int k_r = r * Ky / m->rows;
const int k_c = c * Kx / m->cols;
m->at(r,c) = k_r * Kx + k_c;
}
struct row_calculator : public thrust::unary_function<unsigned int /*input*/, unsigned int /*output*/> {
unsigned int _n_cols;
row_calculator( unsigned int n_cols) : _n_cols(n_cols)
{}
__host__ __device__
unsigned int operator()(const unsigned int &i) const {
return i/_n_cols;
}
};
struct col_calculator : public thrust::unary_function<unsigned int, unsigned int> {
unsigned int _n_cols;
col_calculator( unsigned int n_cols) : _n_cols(n_cols)
{}
__host__ __device__
unsigned int operator()(const unsigned int &i) const {
unsigned int r = i/_n_cols;
return i - r*_n_cols;
}
};
void play() {
Matrix<int> segmat( 25, 10);
Matrix<int> *d_segmat = segmat.h2d();
// create segment mat
hipLaunchKernelGGL(( play_kernel), dim3((segmat.rows*segmat.cols-1)/1024+1), dim3(1024), 0, 0, d_segmat, 5, 2);
segmat.d2h( d_segmat, false);
device_vector<int> segvec_d( segmat.data, segmat.data+segmat.n_cells()); // aka keys
for( auto it=segvec_d.begin(); it!=segvec_d.end(); ++it)
cout << *it << "\t";
// constant iterator for count of segment size spter
constant_iterator<int> const_beg_it(1);
constant_iterator<int> const_beg_end( const_beg_it + segmat.n_cells());
cout << "const iter: \n"
"const_beg_it[0]: " << const_beg_it[0] << "\n"
"const_beg_it[1]: " << const_beg_it[1] << "\n"
"const_beg_it[2]: " << const_beg_it[2] << "\n";
// hiilfs-gnrdel fr die row/col iterators: countet alle cells durch _und_ist_nderbar_
device_vector<unsigned int> value_mappings( segmat.rows*segmat.cols);
sequence(value_mappings.begin(), value_mappings.end());
/*
// hiilfs-iterator fr row/col iterators: countet alle data cells durch
counting_iterator<int> cnt_beg_it(0);
counting_iterator<int> cnt_end_it( cnt_beg_it + segmat.n_cells());
cout << "counting iter: \n"
"cnt_beg_it[0]: " << cnt_beg_it[0] << "\n"
"cnt_beg_it[1]: " << cnt_beg_it[1] << "\n"
"cnt_beg_it[2]: " << cnt_beg_it[2] << "\n";*/
// retrieves row / column for given data element
// typedef transform_iterator< row_calculator, thrust::counting_iterator<int>> row_it_t;
// typedef transform_iterator< col_calculator, thrust::counting_iterator<int>> col_it_t;
auto row_beg_it = make_transform_iterator( counting_iterator<int>(0), row_calculator( segmat.cols));
auto row_end_it = make_transform_iterator( value_mappings.end(), row_calculator( segmat.cols));
auto col_beg_it = make_transform_iterator( counting_iterator<int>(0), col_calculator( segmat.cols));
auto col_end_it = make_transform_iterator( value_mappings.end(), col_calculator( segmat.cols));
cout << "row iter (with " << segmat.cols << " cols): \n"
"row_beg_it[ 0]: " << row_beg_it[ 0] << "\n"
"row_beg_it[ 1]: " << row_beg_it[ 1] << "\n"
"row_beg_it[ 2]: " << row_beg_it[ 2] << "\n"
"row_beg_it[12]: " << row_beg_it[12] << "\n"
"row_beg_it[17]: " << row_beg_it[17] << "\n"
"row_beg_it[29]: " << row_beg_it[29] << "\n";
cout << "col iter (with " << segmat.cols << " cols): \n"
"col_beg_it[ 0]: " << col_beg_it[ 0] << "\n"
"col_beg_it[ 1]: " << col_beg_it[ 1] << "\n"
"col_beg_it[ 2]: " << col_beg_it[ 2] << "\n"
"col_beg_it[12]: " << col_beg_it[12] << "\n"
"col_beg_it[17]: " << col_beg_it[17] << "\n"
"col_beg_it[29]: " << col_beg_it[29] << "\n";
/*
auto values_beg_it = make_zip_iterator( make_tuple( row_beg_it, col_beg_it));
auto values_end_it = make_zip_iterator( make_tuple( row_end_it, col_end_it));
*/
sort_by_key(segvec_d.begin(), segvec_d.end(), value_mappings.begin());
for( int i=0; i<segmat.rows*segmat.cols; ++i) {
cout << "i: " << i << " segment: " << segvec_d[i] << " row: " << row_beg_it[value_mappings[i]] << " col: " << col_beg_it[value_mappings[i]] << "\n";
}
}
__global__
void play_kernel2() {
}
void play2() {
}
///////////////////////////////////////////////////////////////////////////////
// WARMUP KERNEL AND INVOCATION FUNCTION
__global__
void warmup_kernel() {
printf( "Warming up... ");
}
void warmup() {
hipLaunchKernelGGL(( warmup_kernel), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
printf("Done.\n");
}
///////////////////////////////////////////////////////////////////////////////
//struct myfunctor {
//
// unsigned char x;
//
// myfunctor( unsigned char _x) : x(_x)
// {}
//
// __host__ __device__
// unsigned char operator()(const unsigned char &c) const {
// return x-c;
// }
//}; | 58848c1ea99569e4b9b7e1df2a0ab21c72cfef3d.cu | #define HELLO
/******************************************************************************
/* @file Playground kernel function. Have fun!
/*
/*
/* @author langenhagen
/* @version YYMMDD
/******************************************************************************/
#pragma once
///////////////////////////////////////////////////////////////////////////////
// INCLUDES project headers
///////////////////////////////////////////////////////////////////////////////
//INCLUDES C/C++ standard library (and other external libraries)
#include <cstdlib>
#include <iostream>
#include <math.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "thrust/sort.h"
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
//#include <barn_common.hpp> // << provokes linker errors!
#include <matrix.cuh>
#include <barn_cuda_common.cuh>
///////////////////////////////////////////////////////////////////////////////
// DEFINES and MACROS
///////////////////////////////////////////////////////////////////////////////
// NAMESPACE, CONSTANTS and TYPE DECLARATIONS/IMPLEMENTATIONS
using namespace std;
using namespace thrust;
typedef unsigned char uchar;
__global__
void play_kernel( Matrix<int>* m,
const int Kx,
const int Ky) {
const int tx = blockIdx.x * blockDim.x + threadIdx.x;
if( tx+1 > m->n_cells())
return;
const int r = tx / m->cols;
const int c = tx - r*m->cols;
const int k_r = r * Ky / m->rows;
const int k_c = c * Kx / m->cols;
m->at(r,c) = k_r * Kx + k_c;
}
struct row_calculator : public thrust::unary_function<unsigned int /*input*/, unsigned int /*output*/> {
unsigned int _n_cols;
row_calculator( unsigned int n_cols) : _n_cols(n_cols)
{}
__host__ __device__
unsigned int operator()(const unsigned int &i) const {
return i/_n_cols;
}
};
struct col_calculator : public thrust::unary_function<unsigned int, unsigned int> {
unsigned int _n_cols;
col_calculator( unsigned int n_cols) : _n_cols(n_cols)
{}
__host__ __device__
unsigned int operator()(const unsigned int &i) const {
unsigned int r = i/_n_cols;
return i - r*_n_cols;
}
};
void play() {
Matrix<int> segmat( 25, 10);
Matrix<int> *d_segmat = segmat.h2d();
// create segment mat
play_kernel<<< (segmat.rows*segmat.cols-1)/1024+1, 1024>>>( d_segmat, 5, 2);
segmat.d2h( d_segmat, false);
device_vector<int> segvec_d( segmat.data, segmat.data+segmat.n_cells()); // aka keys
for( auto it=segvec_d.begin(); it!=segvec_d.end(); ++it)
cout << *it << "\t";
// constant iterator for count of segment size später
constant_iterator<int> const_beg_it(1);
constant_iterator<int> const_beg_end( const_beg_it + segmat.n_cells());
cout << "const iter: \n"
"const_beg_it[0]: " << const_beg_it[0] << "\n"
"const_beg_it[1]: " << const_beg_it[1] << "\n"
"const_beg_it[2]: " << const_beg_it[2] << "\n";
// hiilfs-gnördel für die row/col iterators: countet alle cells durch _und_ist_änderbar_
device_vector<unsigned int> value_mappings( segmat.rows*segmat.cols);
sequence(value_mappings.begin(), value_mappings.end());
/*
// hiilfs-iterator für row/col iterators: countet alle data cells durch
counting_iterator<int> cnt_beg_it(0);
counting_iterator<int> cnt_end_it( cnt_beg_it + segmat.n_cells());
cout << "counting iter: \n"
"cnt_beg_it[0]: " << cnt_beg_it[0] << "\n"
"cnt_beg_it[1]: " << cnt_beg_it[1] << "\n"
"cnt_beg_it[2]: " << cnt_beg_it[2] << "\n";*/
// retrieves row / column for given data element
// typedef transform_iterator< row_calculator, thrust::counting_iterator<int>> row_it_t;
// typedef transform_iterator< col_calculator, thrust::counting_iterator<int>> col_it_t;
auto row_beg_it = make_transform_iterator( counting_iterator<int>(0), row_calculator( segmat.cols));
auto row_end_it = make_transform_iterator( value_mappings.end(), row_calculator( segmat.cols));
auto col_beg_it = make_transform_iterator( counting_iterator<int>(0), col_calculator( segmat.cols));
auto col_end_it = make_transform_iterator( value_mappings.end(), col_calculator( segmat.cols));
cout << "row iter (with " << segmat.cols << " cols): \n"
"row_beg_it[ 0]: " << row_beg_it[ 0] << "\n"
"row_beg_it[ 1]: " << row_beg_it[ 1] << "\n"
"row_beg_it[ 2]: " << row_beg_it[ 2] << "\n"
"row_beg_it[12]: " << row_beg_it[12] << "\n"
"row_beg_it[17]: " << row_beg_it[17] << "\n"
"row_beg_it[29]: " << row_beg_it[29] << "\n";
cout << "col iter (with " << segmat.cols << " cols): \n"
"col_beg_it[ 0]: " << col_beg_it[ 0] << "\n"
"col_beg_it[ 1]: " << col_beg_it[ 1] << "\n"
"col_beg_it[ 2]: " << col_beg_it[ 2] << "\n"
"col_beg_it[12]: " << col_beg_it[12] << "\n"
"col_beg_it[17]: " << col_beg_it[17] << "\n"
"col_beg_it[29]: " << col_beg_it[29] << "\n";
/*
auto values_beg_it = make_zip_iterator( make_tuple( row_beg_it, col_beg_it));
auto values_end_it = make_zip_iterator( make_tuple( row_end_it, col_end_it));
*/
sort_by_key(segvec_d.begin(), segvec_d.end(), value_mappings.begin());
for( int i=0; i<segmat.rows*segmat.cols; ++i) {
cout << "i: " << i << " segment: " << segvec_d[i] << " row: " << row_beg_it[value_mappings[i]] << " col: " << col_beg_it[value_mappings[i]] << "\n";
}
}
__global__
void play_kernel2() {
}
void play2() {
}
///////////////////////////////////////////////////////////////////////////////
// WARMUP KERNEL AND INVOCATION FUNCTION
__global__
void warmup_kernel() {
printf( "Warming up... ");
}
void warmup() {
warmup_kernel<<<1,1>>>();
cudaDeviceSynchronize();
printf("Done.\n");
}
///////////////////////////////////////////////////////////////////////////////
//struct myfunctor {
//
// unsigned char x;
//
// myfunctor( unsigned char _x) : x(_x)
// {}
//
// __host__ __device__
// unsigned char operator()(const unsigned char &c) const {
// return x-c;
// }
//}; |
4084bdaa555ff0ec0838c2f5813f188d232227db.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////
// GPU version of Monte Carlo algorithm using NVIDIA's CURAND library
////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// CUDA global constants
////////////////////////////////////////////////////////////////////////
__constant__ int N;
__constant__ float T, r, sigma, rho, alpha, dt, con1, con2;
////////////////////////////////////////////////////////////////////////
// kernel routine
////////////////////////////////////////////////////////////////////////
__global__ void pathcalc(float *d_z, float *d_v)
{
float s1, s2, y1, y2, payoff;
int ind;
// move array pointers to correct position
// version 1
// ind = threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// version 2
ind = 2*N*threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// path calculation
s1 = 1.0f;
s2 = 1.0f;
for (int n=0; n<N; n++) {
y1 = d_z[ind];
// version 1
//ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
printf(" ind in first step and d_z = %d %f \n",ind,d_z[ind]);
y2 = rho*y1 + alpha*d_z[ind];
// version 1
//ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
s1 = s1*(con1 + con2*y1);
s2 = s2*(con1 + con2*y2);
}
// put payoff value into device array
payoff = 0.0f;
if ( fabs(s1-1.0f)<0.1f && fabs(s2-1.0f)<0.1f ) payoff = exp(-r*T);
d_v[threadIdx.x + blockIdx.x*blockDim.x] = payoff;
}
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// int NPATH=960000, h_N=100;
int NPATH=100, h_N=100;
float h_T, h_r, h_sigma, h_rho, h_alpha, h_dt, h_con1, h_con2;
float *h_v, *d_v, *d_z;
double sum1, sum2;
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate memory on host and device
h_v = (float *)malloc(sizeof(float)*NPATH);
checkCudaErrors( hipMalloc((void **)&d_v, sizeof(float)*NPATH) );
checkCudaErrors( hipMalloc((void **)&d_z, sizeof(float)*2*h_N*NPATH) );
// define constants and transfer to GPU
h_T = 1.0f;
h_r = 0.05f;
h_sigma = 0.1f;
h_rho = 0.5f;
h_alpha = sqrt(1.0f-h_rho*h_rho);
h_dt = 1.0f/h_N;
h_con1 = 1.0f + h_r*h_dt;
h_con2 = sqrt(h_dt)*h_sigma;
checkCudaErrors( hipMemcpyToSymbol(N, &h_N, sizeof(h_N)) );
checkCudaErrors( hipMemcpyToSymbol(T, &h_T, sizeof(h_T)) );
checkCudaErrors( hipMemcpyToSymbol(r, &h_r, sizeof(h_r)) );
checkCudaErrors( hipMemcpyToSymbol(sigma,&h_sigma,sizeof(h_sigma)) );
checkCudaErrors( hipMemcpyToSymbol(rho, &h_rho, sizeof(h_rho)) );
checkCudaErrors( hipMemcpyToSymbol(alpha,&h_alpha,sizeof(h_alpha)) );
checkCudaErrors( hipMemcpyToSymbol(dt, &h_dt, sizeof(h_dt)) );
checkCudaErrors( hipMemcpyToSymbol(con1, &h_con1, sizeof(h_con1)) );
checkCudaErrors( hipMemcpyToSymbol(con2, &h_con2, sizeof(h_con2)) );
// random number generation
hipEventRecord(start);
hiprandGenerator_t gen;
checkCudaErrors( hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT) );
checkCudaErrors( hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL) );
checkCudaErrors( hiprandGenerateNormal(gen, d_z, 2*h_N*NPATH, 0.0f, 1.0f) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("CURAND normal RNG execution time (ms): %f, samples/sec: %e \n",
milli, 2.0*h_N*NPATH/(0.001*milli));
// execute kernel and time it
hipEventRecord(start);
hipLaunchKernelGGL(( pathcalc), dim3(NPATH/64), dim3(64), 0, 0, d_z, d_v);
getLastCudaError("pathcalc execution failed\n");
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("Monte Carlo kernel execution time (ms): %f \n",milli);
// copy back results
checkCudaErrors( hipMemcpy(h_v, d_v, sizeof(float)*NPATH,
hipMemcpyDeviceToHost) );
// compute average
sum1 = 0.0;
sum2 = 0.0;
for (int i=0; i<NPATH; i++) {
sum1 += h_v[i];
sum2 += h_v[i]*h_v[i];
}
printf("\nAverage value and standard deviation of error = %13.8f %13.8f\n\n",
sum1/NPATH, sqrt((sum2/NPATH - (sum1/NPATH)*(sum1/NPATH))/NPATH) );
// Tidy up library
checkCudaErrors( hiprandDestroyGenerator(gen) );
// Release memory and exit cleanly
free(h_v);
checkCudaErrors( hipFree(d_v) );
checkCudaErrors( hipFree(d_z) );
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
}
| 4084bdaa555ff0ec0838c2f5813f188d232227db.cu |
////////////////////////////////////////////////////////////////////////
// GPU version of Monte Carlo algorithm using NVIDIA's CURAND library
////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <curand.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// CUDA global constants
////////////////////////////////////////////////////////////////////////
__constant__ int N;
__constant__ float T, r, sigma, rho, alpha, dt, con1, con2;
////////////////////////////////////////////////////////////////////////
// kernel routine
////////////////////////////////////////////////////////////////////////
__global__ void pathcalc(float *d_z, float *d_v)
{
float s1, s2, y1, y2, payoff;
int ind;
// move array pointers to correct position
// version 1
// ind = threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// version 2
ind = 2*N*threadIdx.x + 2*N*blockIdx.x*blockDim.x;
// path calculation
s1 = 1.0f;
s2 = 1.0f;
for (int n=0; n<N; n++) {
y1 = d_z[ind];
// version 1
//ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
printf(" ind in first step and d_z = %d %f \n",ind,d_z[ind]);
y2 = rho*y1 + alpha*d_z[ind];
// version 1
//ind += blockDim.x; // shift pointer to next element
// version 2
ind += 1;
s1 = s1*(con1 + con2*y1);
s2 = s2*(con1 + con2*y2);
}
// put payoff value into device array
payoff = 0.0f;
if ( fabs(s1-1.0f)<0.1f && fabs(s2-1.0f)<0.1f ) payoff = exp(-r*T);
d_v[threadIdx.x + blockIdx.x*blockDim.x] = payoff;
}
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// int NPATH=960000, h_N=100;
int NPATH=100, h_N=100;
float h_T, h_r, h_sigma, h_rho, h_alpha, h_dt, h_con1, h_con2;
float *h_v, *d_v, *d_z;
double sum1, sum2;
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate memory on host and device
h_v = (float *)malloc(sizeof(float)*NPATH);
checkCudaErrors( cudaMalloc((void **)&d_v, sizeof(float)*NPATH) );
checkCudaErrors( cudaMalloc((void **)&d_z, sizeof(float)*2*h_N*NPATH) );
// define constants and transfer to GPU
h_T = 1.0f;
h_r = 0.05f;
h_sigma = 0.1f;
h_rho = 0.5f;
h_alpha = sqrt(1.0f-h_rho*h_rho);
h_dt = 1.0f/h_N;
h_con1 = 1.0f + h_r*h_dt;
h_con2 = sqrt(h_dt)*h_sigma;
checkCudaErrors( cudaMemcpyToSymbol(N, &h_N, sizeof(h_N)) );
checkCudaErrors( cudaMemcpyToSymbol(T, &h_T, sizeof(h_T)) );
checkCudaErrors( cudaMemcpyToSymbol(r, &h_r, sizeof(h_r)) );
checkCudaErrors( cudaMemcpyToSymbol(sigma,&h_sigma,sizeof(h_sigma)) );
checkCudaErrors( cudaMemcpyToSymbol(rho, &h_rho, sizeof(h_rho)) );
checkCudaErrors( cudaMemcpyToSymbol(alpha,&h_alpha,sizeof(h_alpha)) );
checkCudaErrors( cudaMemcpyToSymbol(dt, &h_dt, sizeof(h_dt)) );
checkCudaErrors( cudaMemcpyToSymbol(con1, &h_con1, sizeof(h_con1)) );
checkCudaErrors( cudaMemcpyToSymbol(con2, &h_con2, sizeof(h_con2)) );
// random number generation
cudaEventRecord(start);
curandGenerator_t gen;
checkCudaErrors( curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT) );
checkCudaErrors( curandSetPseudoRandomGeneratorSeed(gen, 1234ULL) );
checkCudaErrors( curandGenerateNormal(gen, d_z, 2*h_N*NPATH, 0.0f, 1.0f) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("CURAND normal RNG execution time (ms): %f, samples/sec: %e \n",
milli, 2.0*h_N*NPATH/(0.001*milli));
// execute kernel and time it
cudaEventRecord(start);
pathcalc<<<NPATH/64, 64>>>(d_z, d_v);
getLastCudaError("pathcalc execution failed\n");
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("Monte Carlo kernel execution time (ms): %f \n",milli);
// copy back results
checkCudaErrors( cudaMemcpy(h_v, d_v, sizeof(float)*NPATH,
cudaMemcpyDeviceToHost) );
// compute average
sum1 = 0.0;
sum2 = 0.0;
for (int i=0; i<NPATH; i++) {
sum1 += h_v[i];
sum2 += h_v[i]*h_v[i];
}
printf("\nAverage value and standard deviation of error = %13.8f %13.8f\n\n",
sum1/NPATH, sqrt((sum2/NPATH - (sum1/NPATH)*(sum1/NPATH))/NPATH) );
// Tidy up library
checkCudaErrors( curandDestroyGenerator(gen) );
// Release memory and exit cleanly
free(h_v);
checkCudaErrors( cudaFree(d_v) );
checkCudaErrors( cudaFree(d_z) );
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
}
|
7622ab01df6262ae2b0d82e68f78c350ce975272.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include <math_functions.h>
#include "thrust/device_vector.h"
#include "caffe/common.hpp"
#include "caffe/sequence_layers.hpp"
#ifdef USE_MPI
#include "caffe/mpitask.hpp"
#endif
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void lstm_copy_indicator(const int count, const int output_feature_dim_,
const Dtype * cont_t, const Dtype * src, Dtype * dst) {
CUDA_KERNEL_LOOP(i, count) {
const int b = i / output_feature_dim_;
dst[i] = (cont_t[b] > 0) ? src[i] : Dtype(0);
}
}
template <typename Dtype>
__global__ void lstm_forward_kernel(const int count, const int output_feature_dim_,
Dtype * gates, Dtype * h, Dtype * c,
const Dtype * c_prev) {
CUDA_KERNEL_LOOP(index, count) {
const int index_batch = index / output_feature_dim_,
index_feature = index % output_feature_dim_;
const int offset = index_batch * SLLSTMLayer<Dtype>::NumOfGates
* output_feature_dim_ + index_feature;
const int fi = SLLSTMLayer<Dtype>::I * output_feature_dim_ + offset,
ff = SLLSTMLayer<Dtype>::F * output_feature_dim_ + offset,
fo = SLLSTMLayer<Dtype>::O * output_feature_dim_ + offset,
fg = SLLSTMLayer<Dtype>::G * output_feature_dim_ + offset;
gates[fi] = sigmoid(gates[fi]);
gates[ff] = sigmoid(gates[ff]);
gates[fo] = sigmoid(gates[fo]);
gates[fg] = tanh(gates[fg]);
c[index] = gates[fi] * gates[fg] + gates[ff] * c_prev[index];
h[index] = gates[fo] * tanh(c[index]);
}
}
template <typename Dtype>
__global__ void lstm_backward_kernel(const int batch, const int output_feature_dim_,
const Dtype * gates, Dtype * gates_diff,
const Dtype * c, const Dtype * c_diff,
const Dtype * c_prev, Dtype * c_backpropagate,
const Dtype * h_diff) {
CUDA_KERNEL_LOOP(index, batch * output_feature_dim_) {
const int index_batch = index / output_feature_dim_,
index_feature = index % output_feature_dim_;
const int offset = index_batch * SLLSTMLayer<Dtype>::NumOfGates
* output_feature_dim_ + index_feature;
const int fi = SLLSTMLayer<Dtype>::I * output_feature_dim_ + offset,
ff = SLLSTMLayer<Dtype>::F * output_feature_dim_ + offset,
fo = SLLSTMLayer<Dtype>::O * output_feature_dim_ + offset,
fg = SLLSTMLayer<Dtype>::G * output_feature_dim_ + offset;
const Dtype tanhc = tanh(c[index]);
gates_diff[fo] = tanhc * h_diff[index];
Dtype c_term_diff = c_diff[index] + (1 - tanhc * tanhc)
* gates[fo] * h_diff[index];
gates_diff[ff] = c_prev[index] * c_term_diff;
c_backpropagate[index] = gates[ff] * c_term_diff;
gates_diff[fi] = gates[fg] * c_term_diff;
gates_diff[fg] = gates[fi] * c_term_diff;
}
}
template <typename Dtype>
__global__ void lstm_acts_backward(const int count, const int output_feature_dim_,
const Dtype * gates, Dtype * gates_diff){
const int x_dim = SLLSTMLayer<Dtype>::NumOfGates * output_feature_dim_;
CUDA_KERNEL_LOOP(index, count) {
const int d = index % x_dim;
const Dtype x_act = gates[index];
if (d < 3 * output_feature_dim_)
gates_diff[index] = x_act * (1 - x_act) * gates_diff[index];
else
gates_diff[index] = (1 - x_act * x_act) * gates_diff[index];
}
}
template <typename Dtype>
void SLLSTMLayer<Dtype>::copy_prev_gpu(int t, int count,
const Dtype *cont_t,
const Dtype *c_t, const Dtype *h_t,
Dtype *c_prev, Dtype *h_prev) {
if (t > 0) {
if (cont_t) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( lstm_copy_indicator<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, output_feature_dim_,
cont_t, c_t - count, c_prev);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( lstm_copy_indicator<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, output_feature_dim_,
cont_t, h_t - count, h_prev);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, c_t - count, c_prev);
caffe_copy(count, h_t - count, h_prev);
}
} else {
caffe_gpu_set(count, Dtype(0), c_prev);
caffe_gpu_set(count, Dtype(0), h_prev);
}
}
template <typename Dtype>
void SLLSTMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype * x = bottom[0]->gpu_data();
const Dtype * cont = (bottom.size() > 1) ? bottom[1]->gpu_data() : NULL;
const Dtype * x_static = (bottom.size() > 2) ? bottom[2]->gpu_data() : NULL;
const int T = bottom[0]->shape(0);
const int batch = bottom[0]->shape(1);
const int count = batch * output_feature_dim_;
const int hidden_dim_ = NumOfGates * output_feature_dim_;
const Dtype * wx = this->blobs_[WX]->gpu_data();
const Dtype * ws = (x_static) ? this->blobs_[WS]->gpu_data() : NULL;
const Dtype * uh = this->blobs_[UH]->gpu_data();
const Dtype * b = this->blobs_[B]->gpu_data();
Dtype * c = cell_.mutable_gpu_data();
Dtype * gates = gates_.mutable_gpu_data();
Dtype * h = top[0]->mutable_gpu_data();
Dtype * c_prev = buffer_c_prev_.mutable_gpu_data();
Dtype * h_prev = buffer_h_prev_.mutable_gpu_data();
const Dtype * bias_multiplier = bias_multiplier_.gpu_data();
Dtype * x_static_ws = (x_static) ? x_static_ws_.mutable_gpu_data() : NULL;
caffe_gpu_gemm(CblasNoTrans, CblasTrans,
T * batch, hidden_dim_, input_feature_dim_,
Dtype(1), x, wx,
Dtype(0), gates);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans,
T * batch, hidden_dim_, 1,
Dtype(1), bias_multiplier, b,
Dtype(1), gates);
if (x_static) caffe_gpu_gemm(CblasNoTrans, CblasTrans,
batch, hidden_dim_, input_feature_dim_,
Dtype(1), x_static, ws,
Dtype(0), x_static_ws);
for (int t = 0; t < T; t++) {
const Dtype * cont_t = (cont ? cont + t * batch : NULL);
Dtype * gates_t = gates + t * count * NumOfGates;
Dtype * c_t = c + t * count;
Dtype * h_t = h + t * count;
if (x_static)
caffe_gpu_add(x_static_ws_.count(), x_static_ws, gates_t, gates_t);
copy_prev_gpu(t, count, cont_t, c_t, h_t, c_prev, h_prev);
caffe_gpu_gemm(CblasNoTrans, CblasTrans,
batch, hidden_dim_, output_feature_dim_,
Dtype(1), h_prev, uh,
Dtype(1), gates_t);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( lstm_forward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, output_feature_dim_,
gates_t, h_t, c_t, c_prev);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void SLLSTMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down.size() > 1) {
CHECK(!propagate_down[1]) << "Cannot back-propagate to continuous indicator.";
}
// clean blobs_[n]->gpu_diff()
for (int i = 0; i < NumOfBlobs; i++) {
caffe_gpu_set(this->blobs_[i]->count(), Dtype(0),
this->blobs_[i]->mutable_gpu_diff());
}
const Dtype * x_static = NULL, *ws = NULL;
Dtype * ws_diff = NULL, *x_static_diff = NULL;
if (bottom.size() > 2) {
ws = this->blobs_[WS]->gpu_data();
ws_diff = this->blobs_[WS]->mutable_gpu_diff();
x_static = bottom[2]->gpu_data();
if (propagate_down[2]) {
x_static_diff = bottom[2]->mutable_gpu_diff();
caffe_gpu_set(bottom[2]->count(), Dtype(0),
bottom[2]->mutable_gpu_diff());
}
}
const int T = bottom[0]->shape(0);
const int batch = bottom[0]->shape(1);
const int count = batch * output_feature_dim_;
const int hidden_dim_ = NumOfGates * output_feature_dim_;
// clean c_prev(and diff) & h_prev(and diff)
Dtype * c_diff[2];
c_diff[0] = buffer_c_diff_.mutable_gpu_data();
c_diff[1] = buffer_c_diff_.mutable_gpu_diff();
caffe_gpu_set(count, Dtype(0), c_diff[0]);
caffe_gpu_set(count, Dtype(0), c_diff[1]);
Dtype * h_prev = buffer_h_prev_.mutable_gpu_data();
Dtype * h_backpropagate = buffer_h_prev_.mutable_gpu_diff();
caffe_gpu_set(count, Dtype(0), h_backpropagate);
Dtype * c_prev = buffer_c_prev_.mutable_gpu_data();
// pointers
const Dtype * x = bottom[0]->gpu_data();
const Dtype * cont = (bottom.size() > 1) ? bottom[1]->gpu_data() : NULL;
const Dtype * h = top[0]->gpu_data();
const Dtype * c = cell_.gpu_data();
const Dtype * gates = gates_.gpu_data();
const Dtype * wx = this->blobs_[WX]->gpu_data();
const Dtype * uh = this->blobs_[UH]->gpu_data();
const Dtype * bias_multiplier = bias_multiplier_.gpu_data();
Dtype * h_diff = top[0]->mutable_gpu_diff();
Dtype * gates_diff = gates_.mutable_gpu_diff();
Dtype * wx_diff = this->blobs_[WX]->mutable_gpu_diff();
Dtype * uh_diff = this->blobs_[UH]->mutable_gpu_diff();
Dtype * b_diff = this->blobs_[B]->mutable_gpu_diff();
Dtype * x_diff = propagate_down[0] ? bottom[0]->mutable_gpu_diff() : NULL;
bool FLAG = true;
// loop body
for (int t = T-1; t >= 0; t--) {
const Dtype * cont_t = cont ? cont + t * batch : NULL;
int offset = t * count;
const Dtype * h_t = h + offset;
const Dtype * c_t = c + offset;
const Dtype * gates_t = gates + offset * NumOfGates;
const Dtype * x_t = x + t * batch * input_feature_dim_;
Dtype * h_t_diff = h_diff + offset;
FLAG = !FLAG;
const Dtype * c_t_diff = c_diff[FLAG];
Dtype * c_backpropagate = c_diff[!FLAG];
Dtype * gates_t_diff = gates_diff + offset * NumOfGates;
// accumulate.
caffe_gpu_add(count, h_backpropagate, h_t_diff, h_t_diff);
copy_prev_gpu(t, count, cont_t, c_t, h_t, c_prev, h_prev);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( lstm_backward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, batch, output_feature_dim_,
gates_t, gates_t_diff,
c_t, c_t_diff,
c_prev, c_backpropagate,
h_t_diff);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( lstm_acts_backward<Dtype>), dim3(CAFFE_GET_BLOCKS(count*NumOfGates)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count*NumOfGates, output_feature_dim_,
gates_t, gates_t_diff);
CUDA_POST_KERNEL_CHECK;
caffe_gpu_gemm(CblasTrans, CblasNoTrans,
hidden_dim_, input_feature_dim_, batch,
Dtype(1), gates_t_diff, x_t,
Dtype(1), wx_diff);
if (x_static) caffe_gpu_gemm(CblasTrans, CblasNoTrans,
hidden_dim_, input_feature_dim_, batch,
Dtype(1), gates_t_diff, x_static,
Dtype(1), ws_diff);
if (x_static_diff) caffe_gpu_gemm(CblasNoTrans, CblasNoTrans,
batch, input_feature_dim_, hidden_dim_,
Dtype(1), gates_t_diff, ws,
Dtype(1), x_static_diff);
caffe_gpu_gemm(CblasTrans, CblasNoTrans,
hidden_dim_, output_feature_dim_, batch,
Dtype(1), gates_t_diff, h_prev,
Dtype(1), uh_diff);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans,
batch, output_feature_dim_, hidden_dim_,
Dtype(1), gates_t_diff, uh,
Dtype(0), h_backpropagate);
if (t > 0 && cont_t) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( lstm_copy_indicator<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, output_feature_dim_,
cont_t, h_backpropagate, h_backpropagate);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( lstm_copy_indicator<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, output_feature_dim_,
cont_t, c_backpropagate, c_backpropagate);
CUDA_POST_KERNEL_CHECK;
}
}
if (x_diff) caffe_gpu_gemm(CblasNoTrans, CblasNoTrans,
batch*T, input_feature_dim_, hidden_dim_,
Dtype(1), gates_diff, wx,
Dtype(0), x_diff);
caffe_gpu_gemv<Dtype>(CblasTrans, T * batch, hidden_dim_, 1,
gates_diff, bias_multiplier, Dtype(1), b_diff);
#ifdef USE_MPI
if (Caffe::getStrategy() == 0) {
hipDeviceSynchronize();
for (int i = 0; i < NumOfBlobs; i++) {
MPI_Allreduce(MPI_IN_PLACE, this->blobs_[i]->mutable_gpu_diff(),
this->blobs_[i]->count(), MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
}
} else if (Caffe::getStrategy() == 1) {
MpiTaskList<Dtype> *task_list = (MpiTaskList<Dtype> *)Caffe::getTaskList();
for (int i = 0; i < NumOfBlobs; i++) {
this->blobs_[i]->mutable_cpu_diff();
}
hipDeviceSynchronize();
for (int i = 0; i < NumOfBlobs; i++) {
task_list->push_back(new MpiTask<Dtype>(NULL,0,this->blobs_[i].get(),1,this->blobs_[i]->count()));
}
} else {
}
#endif // USE_MPI
}
INSTANTIATE_LAYER_GPU_FUNCS(SLLSTMLayer);
}; // namespace caffe
| 7622ab01df6262ae2b0d82e68f78c350ce975272.cu | #include <cfloat>
#include <vector>
#include <math_functions.h>
#include "thrust/device_vector.h"
#include "caffe/common.hpp"
#include "caffe/sequence_layers.hpp"
#ifdef USE_MPI
#include "caffe/mpitask.hpp"
#endif
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void lstm_copy_indicator(const int count, const int output_feature_dim_,
const Dtype * cont_t, const Dtype * src, Dtype * dst) {
CUDA_KERNEL_LOOP(i, count) {
const int b = i / output_feature_dim_;
dst[i] = (cont_t[b] > 0) ? src[i] : Dtype(0);
}
}
template <typename Dtype>
__global__ void lstm_forward_kernel(const int count, const int output_feature_dim_,
Dtype * gates, Dtype * h, Dtype * c,
const Dtype * c_prev) {
CUDA_KERNEL_LOOP(index, count) {
const int index_batch = index / output_feature_dim_,
index_feature = index % output_feature_dim_;
const int offset = index_batch * SLLSTMLayer<Dtype>::NumOfGates
* output_feature_dim_ + index_feature;
const int fi = SLLSTMLayer<Dtype>::I * output_feature_dim_ + offset,
ff = SLLSTMLayer<Dtype>::F * output_feature_dim_ + offset,
fo = SLLSTMLayer<Dtype>::O * output_feature_dim_ + offset,
fg = SLLSTMLayer<Dtype>::G * output_feature_dim_ + offset;
gates[fi] = sigmoid(gates[fi]);
gates[ff] = sigmoid(gates[ff]);
gates[fo] = sigmoid(gates[fo]);
gates[fg] = tanh(gates[fg]);
c[index] = gates[fi] * gates[fg] + gates[ff] * c_prev[index];
h[index] = gates[fo] * tanh(c[index]);
}
}
template <typename Dtype>
__global__ void lstm_backward_kernel(const int batch, const int output_feature_dim_,
const Dtype * gates, Dtype * gates_diff,
const Dtype * c, const Dtype * c_diff,
const Dtype * c_prev, Dtype * c_backpropagate,
const Dtype * h_diff) {
CUDA_KERNEL_LOOP(index, batch * output_feature_dim_) {
const int index_batch = index / output_feature_dim_,
index_feature = index % output_feature_dim_;
const int offset = index_batch * SLLSTMLayer<Dtype>::NumOfGates
* output_feature_dim_ + index_feature;
const int fi = SLLSTMLayer<Dtype>::I * output_feature_dim_ + offset,
ff = SLLSTMLayer<Dtype>::F * output_feature_dim_ + offset,
fo = SLLSTMLayer<Dtype>::O * output_feature_dim_ + offset,
fg = SLLSTMLayer<Dtype>::G * output_feature_dim_ + offset;
const Dtype tanhc = tanh(c[index]);
gates_diff[fo] = tanhc * h_diff[index];
Dtype c_term_diff = c_diff[index] + (1 - tanhc * tanhc)
* gates[fo] * h_diff[index];
gates_diff[ff] = c_prev[index] * c_term_diff;
c_backpropagate[index] = gates[ff] * c_term_diff;
gates_diff[fi] = gates[fg] * c_term_diff;
gates_diff[fg] = gates[fi] * c_term_diff;
}
}
template <typename Dtype>
__global__ void lstm_acts_backward(const int count, const int output_feature_dim_,
const Dtype * gates, Dtype * gates_diff){
const int x_dim = SLLSTMLayer<Dtype>::NumOfGates * output_feature_dim_;
CUDA_KERNEL_LOOP(index, count) {
const int d = index % x_dim;
const Dtype x_act = gates[index];
if (d < 3 * output_feature_dim_)
gates_diff[index] = x_act * (1 - x_act) * gates_diff[index];
else
gates_diff[index] = (1 - x_act * x_act) * gates_diff[index];
}
}
template <typename Dtype>
void SLLSTMLayer<Dtype>::copy_prev_gpu(int t, int count,
const Dtype *cont_t,
const Dtype *c_t, const Dtype *h_t,
Dtype *c_prev, Dtype *h_prev) {
if (t > 0) {
if (cont_t) {
// NOLINT_NEXT_LINE(whitespace/operators)
lstm_copy_indicator<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, output_feature_dim_,
cont_t, c_t - count, c_prev);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
lstm_copy_indicator<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, output_feature_dim_,
cont_t, h_t - count, h_prev);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, c_t - count, c_prev);
caffe_copy(count, h_t - count, h_prev);
}
} else {
caffe_gpu_set(count, Dtype(0), c_prev);
caffe_gpu_set(count, Dtype(0), h_prev);
}
}
template <typename Dtype>
void SLLSTMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype * x = bottom[0]->gpu_data();
const Dtype * cont = (bottom.size() > 1) ? bottom[1]->gpu_data() : NULL;
const Dtype * x_static = (bottom.size() > 2) ? bottom[2]->gpu_data() : NULL;
const int T = bottom[0]->shape(0);
const int batch = bottom[0]->shape(1);
const int count = batch * output_feature_dim_;
const int hidden_dim_ = NumOfGates * output_feature_dim_;
const Dtype * wx = this->blobs_[WX]->gpu_data();
const Dtype * ws = (x_static) ? this->blobs_[WS]->gpu_data() : NULL;
const Dtype * uh = this->blobs_[UH]->gpu_data();
const Dtype * b = this->blobs_[B]->gpu_data();
Dtype * c = cell_.mutable_gpu_data();
Dtype * gates = gates_.mutable_gpu_data();
Dtype * h = top[0]->mutable_gpu_data();
Dtype * c_prev = buffer_c_prev_.mutable_gpu_data();
Dtype * h_prev = buffer_h_prev_.mutable_gpu_data();
const Dtype * bias_multiplier = bias_multiplier_.gpu_data();
Dtype * x_static_ws = (x_static) ? x_static_ws_.mutable_gpu_data() : NULL;
caffe_gpu_gemm(CblasNoTrans, CblasTrans,
T * batch, hidden_dim_, input_feature_dim_,
Dtype(1), x, wx,
Dtype(0), gates);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans,
T * batch, hidden_dim_, 1,
Dtype(1), bias_multiplier, b,
Dtype(1), gates);
if (x_static) caffe_gpu_gemm(CblasNoTrans, CblasTrans,
batch, hidden_dim_, input_feature_dim_,
Dtype(1), x_static, ws,
Dtype(0), x_static_ws);
for (int t = 0; t < T; t++) {
const Dtype * cont_t = (cont ? cont + t * batch : NULL);
Dtype * gates_t = gates + t * count * NumOfGates;
Dtype * c_t = c + t * count;
Dtype * h_t = h + t * count;
if (x_static)
caffe_gpu_add(x_static_ws_.count(), x_static_ws, gates_t, gates_t);
copy_prev_gpu(t, count, cont_t, c_t, h_t, c_prev, h_prev);
caffe_gpu_gemm(CblasNoTrans, CblasTrans,
batch, hidden_dim_, output_feature_dim_,
Dtype(1), h_prev, uh,
Dtype(1), gates_t);
// NOLINT_NEXT_LINE(whitespace/operators)
lstm_forward_kernel<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, output_feature_dim_,
gates_t, h_t, c_t, c_prev);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void SLLSTMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down.size() > 1) {
CHECK(!propagate_down[1]) << "Cannot back-propagate to continuous indicator.";
}
// clean blobs_[n]->gpu_diff()
for (int i = 0; i < NumOfBlobs; i++) {
caffe_gpu_set(this->blobs_[i]->count(), Dtype(0),
this->blobs_[i]->mutable_gpu_diff());
}
const Dtype * x_static = NULL, *ws = NULL;
Dtype * ws_diff = NULL, *x_static_diff = NULL;
if (bottom.size() > 2) {
ws = this->blobs_[WS]->gpu_data();
ws_diff = this->blobs_[WS]->mutable_gpu_diff();
x_static = bottom[2]->gpu_data();
if (propagate_down[2]) {
x_static_diff = bottom[2]->mutable_gpu_diff();
caffe_gpu_set(bottom[2]->count(), Dtype(0),
bottom[2]->mutable_gpu_diff());
}
}
const int T = bottom[0]->shape(0);
const int batch = bottom[0]->shape(1);
const int count = batch * output_feature_dim_;
const int hidden_dim_ = NumOfGates * output_feature_dim_;
// clean c_prev(and diff) & h_prev(and diff)
Dtype * c_diff[2];
c_diff[0] = buffer_c_diff_.mutable_gpu_data();
c_diff[1] = buffer_c_diff_.mutable_gpu_diff();
caffe_gpu_set(count, Dtype(0), c_diff[0]);
caffe_gpu_set(count, Dtype(0), c_diff[1]);
Dtype * h_prev = buffer_h_prev_.mutable_gpu_data();
Dtype * h_backpropagate = buffer_h_prev_.mutable_gpu_diff();
caffe_gpu_set(count, Dtype(0), h_backpropagate);
Dtype * c_prev = buffer_c_prev_.mutable_gpu_data();
// pointers
const Dtype * x = bottom[0]->gpu_data();
const Dtype * cont = (bottom.size() > 1) ? bottom[1]->gpu_data() : NULL;
const Dtype * h = top[0]->gpu_data();
const Dtype * c = cell_.gpu_data();
const Dtype * gates = gates_.gpu_data();
const Dtype * wx = this->blobs_[WX]->gpu_data();
const Dtype * uh = this->blobs_[UH]->gpu_data();
const Dtype * bias_multiplier = bias_multiplier_.gpu_data();
Dtype * h_diff = top[0]->mutable_gpu_diff();
Dtype * gates_diff = gates_.mutable_gpu_diff();
Dtype * wx_diff = this->blobs_[WX]->mutable_gpu_diff();
Dtype * uh_diff = this->blobs_[UH]->mutable_gpu_diff();
Dtype * b_diff = this->blobs_[B]->mutable_gpu_diff();
Dtype * x_diff = propagate_down[0] ? bottom[0]->mutable_gpu_diff() : NULL;
bool FLAG = true;
// loop body
for (int t = T-1; t >= 0; t--) {
const Dtype * cont_t = cont ? cont + t * batch : NULL;
int offset = t * count;
const Dtype * h_t = h + offset;
const Dtype * c_t = c + offset;
const Dtype * gates_t = gates + offset * NumOfGates;
const Dtype * x_t = x + t * batch * input_feature_dim_;
Dtype * h_t_diff = h_diff + offset;
FLAG = !FLAG;
const Dtype * c_t_diff = c_diff[FLAG];
Dtype * c_backpropagate = c_diff[!FLAG];
Dtype * gates_t_diff = gates_diff + offset * NumOfGates;
// accumulate.
caffe_gpu_add(count, h_backpropagate, h_t_diff, h_t_diff);
copy_prev_gpu(t, count, cont_t, c_t, h_t, c_prev, h_prev);
// NOLINT_NEXT_LINE(whitespace/operators)
lstm_backward_kernel<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(batch, output_feature_dim_,
gates_t, gates_t_diff,
c_t, c_t_diff,
c_prev, c_backpropagate,
h_t_diff);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
lstm_acts_backward<Dtype><<<CAFFE_GET_BLOCKS(count*NumOfGates),
CAFFE_CUDA_NUM_THREADS>>>(count*NumOfGates, output_feature_dim_,
gates_t, gates_t_diff);
CUDA_POST_KERNEL_CHECK;
caffe_gpu_gemm(CblasTrans, CblasNoTrans,
hidden_dim_, input_feature_dim_, batch,
Dtype(1), gates_t_diff, x_t,
Dtype(1), wx_diff);
if (x_static) caffe_gpu_gemm(CblasTrans, CblasNoTrans,
hidden_dim_, input_feature_dim_, batch,
Dtype(1), gates_t_diff, x_static,
Dtype(1), ws_diff);
if (x_static_diff) caffe_gpu_gemm(CblasNoTrans, CblasNoTrans,
batch, input_feature_dim_, hidden_dim_,
Dtype(1), gates_t_diff, ws,
Dtype(1), x_static_diff);
caffe_gpu_gemm(CblasTrans, CblasNoTrans,
hidden_dim_, output_feature_dim_, batch,
Dtype(1), gates_t_diff, h_prev,
Dtype(1), uh_diff);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans,
batch, output_feature_dim_, hidden_dim_,
Dtype(1), gates_t_diff, uh,
Dtype(0), h_backpropagate);
if (t > 0 && cont_t) {
// NOLINT_NEXT_LINE(whitespace/operators)
lstm_copy_indicator<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, output_feature_dim_,
cont_t, h_backpropagate, h_backpropagate);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
lstm_copy_indicator<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, output_feature_dim_,
cont_t, c_backpropagate, c_backpropagate);
CUDA_POST_KERNEL_CHECK;
}
}
if (x_diff) caffe_gpu_gemm(CblasNoTrans, CblasNoTrans,
batch*T, input_feature_dim_, hidden_dim_,
Dtype(1), gates_diff, wx,
Dtype(0), x_diff);
caffe_gpu_gemv<Dtype>(CblasTrans, T * batch, hidden_dim_, 1,
gates_diff, bias_multiplier, Dtype(1), b_diff);
#ifdef USE_MPI
if (Caffe::getStrategy() == 0) {
cudaDeviceSynchronize();
for (int i = 0; i < NumOfBlobs; i++) {
MPI_Allreduce(MPI_IN_PLACE, this->blobs_[i]->mutable_gpu_diff(),
this->blobs_[i]->count(), MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
}
} else if (Caffe::getStrategy() == 1) {
MpiTaskList<Dtype> *task_list = (MpiTaskList<Dtype> *)Caffe::getTaskList();
for (int i = 0; i < NumOfBlobs; i++) {
this->blobs_[i]->mutable_cpu_diff();
}
cudaDeviceSynchronize();
for (int i = 0; i < NumOfBlobs; i++) {
task_list->push_back(new MpiTask<Dtype>(NULL,0,this->blobs_[i].get(),1,this->blobs_[i]->count()));
}
} else {
}
#endif // USE_MPI
}
INSTANTIATE_LAYER_GPU_FUNCS(SLLSTMLayer);
}; // namespace caffe
|
cc09a40a00361181f56e0fcb3a250a8d4dacc359.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************
Emitting C Generated Code
*******************************************/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include "scanner_header.h"
/************* Functions **************/
__global__ void x9(float** x10, float* x11) {
// this is cuda 2-section concat kernel for 3D inputs at axis 2.
// It concatenates 2 3D arrays on the innermost dimension (dim2).
// arg0: array of input input arrays
// arg1: output array
// call constraint: in.size = 2
// call constraint: sum of in(i).size = out.size for i in [0, 2)
int x12 = blockIdx.x * blockDim.x + threadIdx.x;
if (x12 < 48) {
int x13 = x12 % 8;
if (x13 < 3) x11[x12] = x10[0][x12 / 8 * 3 + x13];
else x11[x12] = x10[1][x12 / 8 * 5 + (x13 - 3)];
}
}
/**************** Snippet ****************/
void Snippet(int x0) {
float* x1 = (float*)malloc(18 * sizeof(float));
scan_floats("golden/concat2/input0.data", x1, 18);
float* x2 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x2, (size_t)(18 * sizeof(float))));
CUDA_CALL(hipMemcpy(x2, x1, (size_t)(18 * sizeof(float)), hipMemcpyHostToDevice));
float* x3 = (float*)malloc(30 * sizeof(float));
scan_floats("golden/concat2/input1.data", x3, 30);
float* x4 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x4, (size_t)(30 * sizeof(float))));
CUDA_CALL(hipMemcpy(x4, x3, (size_t)(30 * sizeof(float)), hipMemcpyHostToDevice));
float* x5 = (float*)malloc(48 * sizeof(float));
float* x6 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x6, (size_t)(48 * sizeof(float))));
float** x7 = (float**)malloc(2 * sizeof(float*));
x7[0] = x2;
x7[1] = x4;
float** x8 = (float**)malloc(0 * sizeof(float*));
CUDA_CALL(hipMalloc(&x8, (size_t)(2 * sizeof(float*))));
CUDA_CALL(hipMemcpy(x8, x7, (size_t)(2 * sizeof(float*)), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( x9), dim3(dim3(1, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x8, x6);
CUDA_CALL(hipMemcpy(x5, x6, (size_t)(48 * sizeof(float)), hipMemcpyDeviceToHost));
// check general cuda3DConcat kernel
check_float_array("golden/concat2/output.data", x5, 48);
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
| cc09a40a00361181f56e0fcb3a250a8d4dacc359.cu | /*****************************************
Emitting C Generated Code
*******************************************/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include "scanner_header.h"
/************* Functions **************/
__global__ void x9(float** x10, float* x11) {
// this is cuda 2-section concat kernel for 3D inputs at axis 2.
// It concatenates 2 3D arrays on the innermost dimension (dim2).
// arg0: array of input input arrays
// arg1: output array
// call constraint: in.size = 2
// call constraint: sum of in(i).size = out.size for i in [0, 2)
int x12 = blockIdx.x * blockDim.x + threadIdx.x;
if (x12 < 48) {
int x13 = x12 % 8;
if (x13 < 3) x11[x12] = x10[0][x12 / 8 * 3 + x13];
else x11[x12] = x10[1][x12 / 8 * 5 + (x13 - 3)];
}
}
/**************** Snippet ****************/
void Snippet(int x0) {
float* x1 = (float*)malloc(18 * sizeof(float));
scan_floats("golden/concat2/input0.data", x1, 18);
float* x2 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x2, (size_t)(18 * sizeof(float))));
CUDA_CALL(cudaMemcpy(x2, x1, (size_t)(18 * sizeof(float)), cudaMemcpyHostToDevice));
float* x3 = (float*)malloc(30 * sizeof(float));
scan_floats("golden/concat2/input1.data", x3, 30);
float* x4 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x4, (size_t)(30 * sizeof(float))));
CUDA_CALL(cudaMemcpy(x4, x3, (size_t)(30 * sizeof(float)), cudaMemcpyHostToDevice));
float* x5 = (float*)malloc(48 * sizeof(float));
float* x6 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x6, (size_t)(48 * sizeof(float))));
float** x7 = (float**)malloc(2 * sizeof(float*));
x7[0] = x2;
x7[1] = x4;
float** x8 = (float**)malloc(0 * sizeof(float*));
CUDA_CALL(cudaMalloc(&x8, (size_t)(2 * sizeof(float*))));
CUDA_CALL(cudaMemcpy(x8, x7, (size_t)(2 * sizeof(float*)), cudaMemcpyHostToDevice));
x9<<<dim3(1, 1, 1), dim3(512, 1, 1)>>>(x8, x6);
CUDA_CALL(cudaMemcpy(x5, x6, (size_t)(48 * sizeof(float)), cudaMemcpyDeviceToHost));
// check general cuda3DConcat kernel
check_float_array("golden/concat2/output.data", x5, 48);
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
|
77d6bbb70625d436da332ead93076cf7c67e18e6.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define HISTOGRAM_SIZE 64
#define TILE_WITDH 16
void check_cuda(hipError_t error, const char *filename, const int line)
{
if (error != hipSuccess) {
fprintf(stderr, "Error: %s:%d: %s: %s\n", filename, line,
hipGetErrorName(error), hipGetErrorString(error));
exit(EXIT_FAILURE);
}
}
#define CUDACHECK(cmd) check_cuda(cmd, __FILE__, __LINE__)
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *)malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel *)malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
__global__ void histogram_kernel(PPMImage *d_image, float* hist) {
__shared__ float private_hist[HISTOGRAM_SIZE];
float size = d_image->y*d_image->x*1.0;
if(threadIdx.x * TILE_WITDH + threadIdx.y < HISTOGRAM_SIZE) private_hist[threadIdx.x * TILE_WITDH + threadIdx.y] = 0;
__syncthreads();
// Get variable values
int col = blockDim.x * blockIdx.x + threadIdx.x,
row = blockDim.y * blockIdx.y + threadIdx.y,
index = row * d_image->x + col;
if((row < d_image->y && col < d_image->x) && (index < d_image->x*d_image->y)) {
atomicAdd(&(private_hist[16*d_image->data[index].red + 4 * d_image->data[index].green + d_image->data[index].blue]), 1);
}
__syncthreads();
if(threadIdx.x * TILE_WITDH + threadIdx.y < HISTOGRAM_SIZE) {
atomicAdd(&(hist[threadIdx.x * TILE_WITDH + threadIdx.y]), (private_hist[threadIdx.x * TILE_WITDH + threadIdx.y]/size));
}
}
double Histogram(PPMImage *image, float *h_h) {
float ms;
hipEvent_t start, stop;
int i;
unsigned int rows, cols, img_size;
PPMImage *d_image;
PPMPixel *d_pixels;
float *d_hist;
// Create Events
CUDACHECK(hipEventCreate(&start));
CUDACHECK(hipEventCreate(&stop));
// Get image data
cols = image->x;
rows = image->y;
img_size = cols * rows;
//Process every image data
for (i = 0; i < img_size; i++) {
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
hipMalloc((void **)&d_image, sizeof(PPMImage));
hipMalloc((void **)&d_pixels, sizeof(PPMPixel) * img_size);
hipMalloc((void **)&d_hist, HISTOGRAM_SIZE*sizeof(float));
hipMemcpy(d_image, image, sizeof(PPMImage), hipMemcpyHostToDevice);
hipMemcpy(d_pixels, image->data, sizeof(PPMPixel) * img_size, hipMemcpyHostToDevice);
hipMemcpy(&(d_image->data), &d_pixels, sizeof(PPMPixel *), hipMemcpyHostToDevice);
hipMemcpy(d_hist, h_h, HISTOGRAM_SIZE*sizeof(float), hipMemcpyHostToDevice);
dim3 dimGrid(ceil((float)cols / TILE_WITDH), ceil((float)rows / TILE_WITDH), 1);
dim3 dimBlock(TILE_WITDH, TILE_WITDH, 1);
// Launch kernel and compute kernel runtime.
// Warning: make sure only the kernel is being profiled, memcpies should be
// out of this region.
CUDACHECK(hipEventRecord(start));
hipLaunchKernelGGL(( histogram_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_image, d_hist);
CUDACHECK(hipEventRecord(stop));
CUDACHECK(hipEventSynchronize(stop));
CUDACHECK(hipEventElapsedTime(&ms, start, stop));
hipMemcpy(h_h, d_hist, HISTOGRAM_SIZE*sizeof(float), hipMemcpyDeviceToHost);
// Destroy events
CUDACHECK(hipEventDestroy(start));
CUDACHECK(hipEventDestroy(stop));
hipFree(d_image);
hipFree(d_pixels);
hipFree(d_hist);
return ((double)ms) / 1000.0;
}
int main(int argc, char *argv[]) {
if (argc < 2) {
fprintf(stderr, "Error: missing path to input file\n");
return 1;
}
PPMImage *image = readPPM(argv[1]);
float *h = (float *)malloc(sizeof(float) * 64);
// Initialize histogram
for (int i = 0; i < 64; i++)
h[i] = 0.0;
// Compute histogram
double t = Histogram(image, h);
for (int i = 0; i < 64; i++)
printf("%0.3f ", h[i]);
printf("\n");
fprintf(stderr, "%lf\n", t);
free(h);
}
| 77d6bbb70625d436da332ead93076cf7c67e18e6.cu | #include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define HISTOGRAM_SIZE 64
#define TILE_WITDH 16
void check_cuda(cudaError_t error, const char *filename, const int line)
{
if (error != cudaSuccess) {
fprintf(stderr, "Error: %s:%d: %s: %s\n", filename, line,
cudaGetErrorName(error), cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
#define CUDACHECK(cmd) check_cuda(cmd, __FILE__, __LINE__)
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *)malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel *)malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
__global__ void histogram_kernel(PPMImage *d_image, float* hist) {
__shared__ float private_hist[HISTOGRAM_SIZE];
float size = d_image->y*d_image->x*1.0;
if(threadIdx.x * TILE_WITDH + threadIdx.y < HISTOGRAM_SIZE) private_hist[threadIdx.x * TILE_WITDH + threadIdx.y] = 0;
__syncthreads();
// Get variable values
int col = blockDim.x * blockIdx.x + threadIdx.x,
row = blockDim.y * blockIdx.y + threadIdx.y,
index = row * d_image->x + col;
if((row < d_image->y && col < d_image->x) && (index < d_image->x*d_image->y)) {
atomicAdd(&(private_hist[16*d_image->data[index].red + 4 * d_image->data[index].green + d_image->data[index].blue]), 1);
}
__syncthreads();
if(threadIdx.x * TILE_WITDH + threadIdx.y < HISTOGRAM_SIZE) {
atomicAdd(&(hist[threadIdx.x * TILE_WITDH + threadIdx.y]), (private_hist[threadIdx.x * TILE_WITDH + threadIdx.y]/size));
}
}
double Histogram(PPMImage *image, float *h_h) {
float ms;
cudaEvent_t start, stop;
int i;
unsigned int rows, cols, img_size;
PPMImage *d_image;
PPMPixel *d_pixels;
float *d_hist;
// Create Events
CUDACHECK(cudaEventCreate(&start));
CUDACHECK(cudaEventCreate(&stop));
// Get image data
cols = image->x;
rows = image->y;
img_size = cols * rows;
//Process every image data
for (i = 0; i < img_size; i++) {
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
cudaMalloc((void **)&d_image, sizeof(PPMImage));
cudaMalloc((void **)&d_pixels, sizeof(PPMPixel) * img_size);
cudaMalloc((void **)&d_hist, HISTOGRAM_SIZE*sizeof(float));
cudaMemcpy(d_image, image, sizeof(PPMImage), cudaMemcpyHostToDevice);
cudaMemcpy(d_pixels, image->data, sizeof(PPMPixel) * img_size, cudaMemcpyHostToDevice);
cudaMemcpy(&(d_image->data), &d_pixels, sizeof(PPMPixel *), cudaMemcpyHostToDevice);
cudaMemcpy(d_hist, h_h, HISTOGRAM_SIZE*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(ceil((float)cols / TILE_WITDH), ceil((float)rows / TILE_WITDH), 1);
dim3 dimBlock(TILE_WITDH, TILE_WITDH, 1);
// Launch kernel and compute kernel runtime.
// Warning: make sure only the kernel is being profiled, memcpies should be
// out of this region.
CUDACHECK(cudaEventRecord(start));
histogram_kernel<<<dimGrid, dimBlock>>>(d_image, d_hist);
CUDACHECK(cudaEventRecord(stop));
CUDACHECK(cudaEventSynchronize(stop));
CUDACHECK(cudaEventElapsedTime(&ms, start, stop));
cudaMemcpy(h_h, d_hist, HISTOGRAM_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
// Destroy events
CUDACHECK(cudaEventDestroy(start));
CUDACHECK(cudaEventDestroy(stop));
cudaFree(d_image);
cudaFree(d_pixels);
cudaFree(d_hist);
return ((double)ms) / 1000.0;
}
int main(int argc, char *argv[]) {
if (argc < 2) {
fprintf(stderr, "Error: missing path to input file\n");
return 1;
}
PPMImage *image = readPPM(argv[1]);
float *h = (float *)malloc(sizeof(float) * 64);
// Initialize histogram
for (int i = 0; i < 64; i++)
h[i] = 0.0;
// Compute histogram
double t = Histogram(image, h);
for (int i = 0; i < 64; i++)
printf("%0.3f ", h[i]);
printf("\n");
fprintf(stderr, "%lf\n", t);
free(h);
}
|
dc88bd66e9a7b48378b89cdc37d9ea12f1527046.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright Contributors to the Open Shading Language project.
// SPDX-License-Identifier: BSD-3-Clause
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#include <optix.h>
#if (OPTIX_VERSION < 70000)
#include <optixu/optixu_math_namespace.h>
#else
#include <optix.h>
#include <hip/hip_runtime.h>
#endif
#include <OSL/device_string.h>
#include <OSL/oslclosure.h>
#include "rend_lib.h"
#include "util.h"
#if (OPTIX_VERSION < 70000)
// Ray payload
rtDeclareVariable (PRD_radiance, prd_radiance, rtPayload, );
// ray/hit variables
rtDeclareVariable (float3, shading_normal, attribute shading_normal, );
rtDeclareVariable (float3, geometric_normal, attribute geometric_normal,);
rtDeclareVariable (float3, texcoord, attribute texcoord, );
rtDeclareVariable (float, surface_area, attribute surface_area, );
rtDeclareVariable (float3, dPdu, attribute dPdu, );
rtDeclareVariable (float3, dPdv, attribute dPdv, );
rtDeclareVariable (int, obj_id, attribute obj_id, );
rtDeclareVariable (int, lgt_idx, attribute lgt_idx, );
// ray/hit variables
rtDeclareVariable (uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable (uint2, launch_dim, rtLaunchDim, );
rtDeclareVariable (optix::Ray, ray, rtCurrentRay, );
rtDeclareVariable (float, t_hit, rtIntersectionDistance, );
// Buffers
rtBuffer<float3,2> output_buffer;
// Function pointers for the OSL shader
rtDeclareVariable (rtCallableProgramId<void (void*, void*)>, osl_init_func, , );
rtDeclareVariable (rtCallableProgramId<void (void*, void*)>, osl_group_func, ,);
RT_PROGRAM void any_hit_shadow()
{
rtTerminateRay();
}
static __device__
void globals_from_hit(ShaderGlobals& sg)
{
// Setup the ShaderGlobals
sg.I = ray.direction;
sg.N = normalize(rtTransformNormal (RT_OBJECT_TO_WORLD, shading_normal));
sg.Ng = normalize(rtTransformNormal (RT_OBJECT_TO_WORLD, geometric_normal));
sg.P = ray.origin + t_hit * ray.direction;
sg.dPdu = dPdu;
sg.u = texcoord.x;
sg.v = texcoord.y;
sg.Ci = NULL;
sg.surfacearea = surface_area;
sg.backfacing = (dot(sg.N, sg.I) > 0.0f);
if (sg.backfacing) {
sg.N = -sg.N;
sg.Ng = -sg.Ng;
}
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
}
static __device__
float3 process_closure(const OSL::ClosureColor* closure_tree)
{
OSL::Color3 result = OSL::Color3 (0.0f);
if (!closure_tree) {
return make_float3(result.x, result.y, result.z);
}
// The depth of the closure tree must not exceed the stack size.
// A stack size of 8 is probably quite generous for relatively
// balanced trees.
const int STACK_SIZE = 8;
// Non-recursive traversal stack
int stack_idx = 0;
const OSL::ClosureColor* ptr_stack[STACK_SIZE];
OSL::Color3 weight_stack[STACK_SIZE];
// Shading accumulator
OSL::Color3 weight = OSL::Color3(1.0f);
const void* cur = closure_tree;
while (cur) {
switch (((OSL::ClosureColor*)cur)->id) {
case OSL::ClosureColor::ADD: {
ptr_stack [stack_idx ] = ((OSL::ClosureAdd*) cur)->closureB;
weight_stack[stack_idx++] = weight;
cur = ((OSL::ClosureAdd*) cur)->closureA;
break;
}
case OSL::ClosureColor::MUL: {
weight *= ((OSL::ClosureMul*) cur)->weight;
cur = ((OSL::ClosureMul*) cur)->closure;
break;
}
case EMISSION_ID: {
cur = NULL;
break;
}
case DIFFUSE_ID:
case OREN_NAYAR_ID:
case PHONG_ID:
case WARD_ID:
case REFLECTION_ID:
case REFRACTION_ID:
case FRESNEL_REFLECTION_ID: {
result += ((OSL::ClosureComponent*) cur)->w * weight;
cur = NULL;
break;
}
case MICROFACET_ID: {
const char* mem = (const char*)((OSL::ClosureComponent*) cur)->data();
const char* dist_str = *(const char**) &mem[0];
#if 0
if (launch_index.x == launch_dim.x / 2 && launch_index.y == launch_dim.y / 2)
printf ("microfacet, dist: %s\n", HDSTR(dist_str).c_str());
#endif
if (HDSTR(dist_str) == OSL::DeviceStrings::default_)
return make_float3(0.0f, 1.0f, 1.0f);
return make_float3(1.0f, 0.0f, 1.0f);
}
default:
cur = NULL;
break;
}
if (cur == NULL && stack_idx > 0) {
cur = ptr_stack [--stack_idx];
weight = weight_stack[ stack_idx];
}
}
return make_float3(result.x, result.y, result.z);
}
RT_PROGRAM void closest_hit_osl()
{
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
ShaderGlobals sg;
globals_from_hit (sg);
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Create some run-time options structs. The OSL shader fills in the structs
// as it executes, based on the options specified in the shader source.
NoiseOptCUDA noiseopt;
TextureOptCUDA textureopt;
TraceOptCUDA traceopt;
// Pack the pointers to the options structs in a faux "context",
// which is a rough stand-in for the host ShadingContext.
ShadingContextCUDA shading_context = {
&noiseopt, &textureopt, &traceopt
};
sg.context = &shading_context;
// Run the OSL group and init functions
osl_init_func (&sg, params);
osl_group_func(&sg, params);
prd_radiance.result = process_closure ((OSL::ClosureColor*) sg.Ci);
}
#else //#if (OPTIX_VERSION < 70000)
#include "../render_params.h"
extern "C" {
__device__ __constant__ RenderParams render_params;
}
extern"C" __global__ void __anyhit__any_hit_shadow ()
{
optixTerminateRay();
}
static __device__
void globals_from_hit (ShaderGlobals& sg)
{
const GenericRecord *record = reinterpret_cast<GenericRecord *> (optixGetSbtDataPointer());
ShaderGlobals local_sg;
// hit-kind 0: quad hit
// 1: sphere hit
optixDirectCall<void, unsigned int, float, float3, float3, ShaderGlobals *>(
optixGetHitKind(),
optixGetPrimitiveIndex(),
optixGetRayTmax(),
optixGetWorldRayOrigin(),
optixGetWorldRayDirection(),
&local_sg);
// Setup the ShaderGlobals
const float3 ray_direction = optixGetWorldRayDirection();
const float3 ray_origin = optixGetWorldRayOrigin();
const float t_hit = optixGetRayTmin();
sg.I = ray_direction;
sg.N = normalize (optixTransformNormalFromObjectToWorldSpace (local_sg.N));
sg.Ng = normalize (optixTransformNormalFromObjectToWorldSpace (local_sg.Ng));
sg.P = ray_origin + t_hit * ray_direction;
sg.dPdu = local_sg.dPdu;
sg.dPdv = local_sg.dPdv;
sg.u = local_sg.u;
sg.v = local_sg.v;
sg.Ci = NULL;
sg.surfacearea = local_sg.surfacearea;
sg.backfacing = dot (sg.N, sg.I) > 0.0f;
sg.shaderID = local_sg.shaderID;
if (sg.backfacing) {
sg.N = -sg.N;
sg.Ng = -sg.Ng;
}
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
}
static __device__
float3 process_closure (const OSL::ClosureColor* closure_tree)
{
OSL::Color3 result = OSL::Color3 (0.0f);
if (!closure_tree) {
return make_float3 (result.x, result.y, result.z);
}
// The depth of the closure tree must not exceed the stack size.
// A stack size of 8 is probably quite generous for relatively
// balanced trees.
const int STACK_SIZE = 8;
// Non-recursive traversal stack
int stack_idx = 0;
const OSL::ClosureColor* ptr_stack[STACK_SIZE];
OSL::Color3 weight_stack[STACK_SIZE];
// Shading accumulator
OSL::Color3 weight = OSL::Color3 (1.0f);
const void* cur = closure_tree;
while (cur) {
switch (((OSL::ClosureColor*)cur)->id) {
case OSL::ClosureColor::ADD: {
ptr_stack [stack_idx ] = ((OSL::ClosureAdd*) cur)->closureB;
weight_stack[stack_idx++] = weight;
cur = ((OSL::ClosureAdd*) cur)->closureA;
break;
}
case OSL::ClosureColor::MUL: {
weight *= ((OSL::ClosureMul*) cur)->weight;
cur = ((OSL::ClosureMul*) cur)->closure;
break;
}
case EMISSION_ID: {
cur = NULL;
break;
}
case DIFFUSE_ID:
case OREN_NAYAR_ID:
case PHONG_ID:
case WARD_ID:
case REFLECTION_ID:
case REFRACTION_ID:
case FRESNEL_REFLECTION_ID: {
result += ((OSL::ClosureComponent*) cur)->w * weight;
cur = NULL;
break;
}
case MICROFACET_ID: {
const char* mem = (const char*)((OSL::ClosureComponent*) cur)->data();
const char* dist_str = *(const char**) &mem[0];
if (HDSTR(dist_str) == STRING_PARAMS(default))
return make_float3(0.0f, 1.0f, 1.0f);
else
return make_float3(1.0f, 0.0f, 1.0f);
break;
}
default:
cur = NULL;
break;
}
if (cur == NULL && stack_idx > 0) {
cur = ptr_stack [--stack_idx];
weight = weight_stack[ stack_idx];
}
}
return make_float3(result.x, result.y, result.z);
}
extern "C" __global__ void __closesthit__closest_hit_osl()
{
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
ShaderGlobals sg;
globals_from_hit (sg);
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Create some run-time options structs. The OSL shader fills in the structs
// as it executes, based on the options specified in the shader source.
NoiseOptCUDA noiseopt;
TextureOptCUDA textureopt;
TraceOptCUDA traceopt;
// Pack the pointers to the options structs in a faux "context",
// which is a rough stand-in for the host ShadingContext.
ShadingContextCUDA shading_context = {
&noiseopt, &textureopt, &traceopt
};
sg.context = &shading_context;
// Run the OSL group and init functions
const unsigned int shaderInitOpIdx = 2u + 2u * sg.shaderID + 0u;
const unsigned int shaderGroupIdx = 2u + 2u * sg.shaderID + 1u;
optixDirectCall<void, ShaderGlobals*, void *>(shaderInitOpIdx, &sg, params); // call osl_init_func
optixDirectCall<void, ShaderGlobals*, void *>(shaderGroupIdx , &sg, params); // call osl_group_func
float3 result = process_closure ((OSL::ClosureColor*) sg.Ci);
uint3 launch_dims = optixGetLaunchDimensions();
uint3 launch_index = optixGetLaunchIndex();
float3* output_buffer = reinterpret_cast<float3 *>(render_params.output_buffer);
int pixel = launch_index.y * launch_dims.x + launch_index.x;
output_buffer[pixel] = make_float3(result.x, result.y, result.z);
}
#endif //#if (OPTIX_VERSION < 70000)
| dc88bd66e9a7b48378b89cdc37d9ea12f1527046.cu | // Copyright Contributors to the Open Shading Language project.
// SPDX-License-Identifier: BSD-3-Clause
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#include <optix.h>
#if (OPTIX_VERSION < 70000)
#include <optixu/optixu_math_namespace.h>
#else
#include <optix.h>
#include <cuda_runtime.h>
#endif
#include <OSL/device_string.h>
#include <OSL/oslclosure.h>
#include "rend_lib.h"
#include "util.h"
#if (OPTIX_VERSION < 70000)
// Ray payload
rtDeclareVariable (PRD_radiance, prd_radiance, rtPayload, );
// ray/hit variables
rtDeclareVariable (float3, shading_normal, attribute shading_normal, );
rtDeclareVariable (float3, geometric_normal, attribute geometric_normal,);
rtDeclareVariable (float3, texcoord, attribute texcoord, );
rtDeclareVariable (float, surface_area, attribute surface_area, );
rtDeclareVariable (float3, dPdu, attribute dPdu, );
rtDeclareVariable (float3, dPdv, attribute dPdv, );
rtDeclareVariable (int, obj_id, attribute obj_id, );
rtDeclareVariable (int, lgt_idx, attribute lgt_idx, );
// ray/hit variables
rtDeclareVariable (uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable (uint2, launch_dim, rtLaunchDim, );
rtDeclareVariable (optix::Ray, ray, rtCurrentRay, );
rtDeclareVariable (float, t_hit, rtIntersectionDistance, );
// Buffers
rtBuffer<float3,2> output_buffer;
// Function pointers for the OSL shader
rtDeclareVariable (rtCallableProgramId<void (void*, void*)>, osl_init_func, , );
rtDeclareVariable (rtCallableProgramId<void (void*, void*)>, osl_group_func, ,);
RT_PROGRAM void any_hit_shadow()
{
rtTerminateRay();
}
static __device__
void globals_from_hit(ShaderGlobals& sg)
{
// Setup the ShaderGlobals
sg.I = ray.direction;
sg.N = normalize(rtTransformNormal (RT_OBJECT_TO_WORLD, shading_normal));
sg.Ng = normalize(rtTransformNormal (RT_OBJECT_TO_WORLD, geometric_normal));
sg.P = ray.origin + t_hit * ray.direction;
sg.dPdu = dPdu;
sg.u = texcoord.x;
sg.v = texcoord.y;
sg.Ci = NULL;
sg.surfacearea = surface_area;
sg.backfacing = (dot(sg.N, sg.I) > 0.0f);
if (sg.backfacing) {
sg.N = -sg.N;
sg.Ng = -sg.Ng;
}
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
}
static __device__
float3 process_closure(const OSL::ClosureColor* closure_tree)
{
OSL::Color3 result = OSL::Color3 (0.0f);
if (!closure_tree) {
return make_float3(result.x, result.y, result.z);
}
// The depth of the closure tree must not exceed the stack size.
// A stack size of 8 is probably quite generous for relatively
// balanced trees.
const int STACK_SIZE = 8;
// Non-recursive traversal stack
int stack_idx = 0;
const OSL::ClosureColor* ptr_stack[STACK_SIZE];
OSL::Color3 weight_stack[STACK_SIZE];
// Shading accumulator
OSL::Color3 weight = OSL::Color3(1.0f);
const void* cur = closure_tree;
while (cur) {
switch (((OSL::ClosureColor*)cur)->id) {
case OSL::ClosureColor::ADD: {
ptr_stack [stack_idx ] = ((OSL::ClosureAdd*) cur)->closureB;
weight_stack[stack_idx++] = weight;
cur = ((OSL::ClosureAdd*) cur)->closureA;
break;
}
case OSL::ClosureColor::MUL: {
weight *= ((OSL::ClosureMul*) cur)->weight;
cur = ((OSL::ClosureMul*) cur)->closure;
break;
}
case EMISSION_ID: {
cur = NULL;
break;
}
case DIFFUSE_ID:
case OREN_NAYAR_ID:
case PHONG_ID:
case WARD_ID:
case REFLECTION_ID:
case REFRACTION_ID:
case FRESNEL_REFLECTION_ID: {
result += ((OSL::ClosureComponent*) cur)->w * weight;
cur = NULL;
break;
}
case MICROFACET_ID: {
const char* mem = (const char*)((OSL::ClosureComponent*) cur)->data();
const char* dist_str = *(const char**) &mem[0];
#if 0
if (launch_index.x == launch_dim.x / 2 && launch_index.y == launch_dim.y / 2)
printf ("microfacet, dist: %s\n", HDSTR(dist_str).c_str());
#endif
if (HDSTR(dist_str) == OSL::DeviceStrings::default_)
return make_float3(0.0f, 1.0f, 1.0f);
return make_float3(1.0f, 0.0f, 1.0f);
}
default:
cur = NULL;
break;
}
if (cur == NULL && stack_idx > 0) {
cur = ptr_stack [--stack_idx];
weight = weight_stack[ stack_idx];
}
}
return make_float3(result.x, result.y, result.z);
}
RT_PROGRAM void closest_hit_osl()
{
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
ShaderGlobals sg;
globals_from_hit (sg);
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Create some run-time options structs. The OSL shader fills in the structs
// as it executes, based on the options specified in the shader source.
NoiseOptCUDA noiseopt;
TextureOptCUDA textureopt;
TraceOptCUDA traceopt;
// Pack the pointers to the options structs in a faux "context",
// which is a rough stand-in for the host ShadingContext.
ShadingContextCUDA shading_context = {
&noiseopt, &textureopt, &traceopt
};
sg.context = &shading_context;
// Run the OSL group and init functions
osl_init_func (&sg, params);
osl_group_func(&sg, params);
prd_radiance.result = process_closure ((OSL::ClosureColor*) sg.Ci);
}
#else //#if (OPTIX_VERSION < 70000)
#include "../render_params.h"
extern "C" {
__device__ __constant__ RenderParams render_params;
}
extern"C" __global__ void __anyhit__any_hit_shadow ()
{
optixTerminateRay();
}
static __device__
void globals_from_hit (ShaderGlobals& sg)
{
const GenericRecord *record = reinterpret_cast<GenericRecord *> (optixGetSbtDataPointer());
ShaderGlobals local_sg;
// hit-kind 0: quad hit
// 1: sphere hit
optixDirectCall<void, unsigned int, float, float3, float3, ShaderGlobals *>(
optixGetHitKind(),
optixGetPrimitiveIndex(),
optixGetRayTmax(),
optixGetWorldRayOrigin(),
optixGetWorldRayDirection(),
&local_sg);
// Setup the ShaderGlobals
const float3 ray_direction = optixGetWorldRayDirection();
const float3 ray_origin = optixGetWorldRayOrigin();
const float t_hit = optixGetRayTmin();
sg.I = ray_direction;
sg.N = normalize (optixTransformNormalFromObjectToWorldSpace (local_sg.N));
sg.Ng = normalize (optixTransformNormalFromObjectToWorldSpace (local_sg.Ng));
sg.P = ray_origin + t_hit * ray_direction;
sg.dPdu = local_sg.dPdu;
sg.dPdv = local_sg.dPdv;
sg.u = local_sg.u;
sg.v = local_sg.v;
sg.Ci = NULL;
sg.surfacearea = local_sg.surfacearea;
sg.backfacing = dot (sg.N, sg.I) > 0.0f;
sg.shaderID = local_sg.shaderID;
if (sg.backfacing) {
sg.N = -sg.N;
sg.Ng = -sg.Ng;
}
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
}
static __device__
float3 process_closure (const OSL::ClosureColor* closure_tree)
{
OSL::Color3 result = OSL::Color3 (0.0f);
if (!closure_tree) {
return make_float3 (result.x, result.y, result.z);
}
// The depth of the closure tree must not exceed the stack size.
// A stack size of 8 is probably quite generous for relatively
// balanced trees.
const int STACK_SIZE = 8;
// Non-recursive traversal stack
int stack_idx = 0;
const OSL::ClosureColor* ptr_stack[STACK_SIZE];
OSL::Color3 weight_stack[STACK_SIZE];
// Shading accumulator
OSL::Color3 weight = OSL::Color3 (1.0f);
const void* cur = closure_tree;
while (cur) {
switch (((OSL::ClosureColor*)cur)->id) {
case OSL::ClosureColor::ADD: {
ptr_stack [stack_idx ] = ((OSL::ClosureAdd*) cur)->closureB;
weight_stack[stack_idx++] = weight;
cur = ((OSL::ClosureAdd*) cur)->closureA;
break;
}
case OSL::ClosureColor::MUL: {
weight *= ((OSL::ClosureMul*) cur)->weight;
cur = ((OSL::ClosureMul*) cur)->closure;
break;
}
case EMISSION_ID: {
cur = NULL;
break;
}
case DIFFUSE_ID:
case OREN_NAYAR_ID:
case PHONG_ID:
case WARD_ID:
case REFLECTION_ID:
case REFRACTION_ID:
case FRESNEL_REFLECTION_ID: {
result += ((OSL::ClosureComponent*) cur)->w * weight;
cur = NULL;
break;
}
case MICROFACET_ID: {
const char* mem = (const char*)((OSL::ClosureComponent*) cur)->data();
const char* dist_str = *(const char**) &mem[0];
if (HDSTR(dist_str) == STRING_PARAMS(default))
return make_float3(0.0f, 1.0f, 1.0f);
else
return make_float3(1.0f, 0.0f, 1.0f);
break;
}
default:
cur = NULL;
break;
}
if (cur == NULL && stack_idx > 0) {
cur = ptr_stack [--stack_idx];
weight = weight_stack[ stack_idx];
}
}
return make_float3(result.x, result.y, result.z);
}
extern "C" __global__ void __closesthit__closest_hit_osl()
{
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
ShaderGlobals sg;
globals_from_hit (sg);
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Create some run-time options structs. The OSL shader fills in the structs
// as it executes, based on the options specified in the shader source.
NoiseOptCUDA noiseopt;
TextureOptCUDA textureopt;
TraceOptCUDA traceopt;
// Pack the pointers to the options structs in a faux "context",
// which is a rough stand-in for the host ShadingContext.
ShadingContextCUDA shading_context = {
&noiseopt, &textureopt, &traceopt
};
sg.context = &shading_context;
// Run the OSL group and init functions
const unsigned int shaderInitOpIdx = 2u + 2u * sg.shaderID + 0u;
const unsigned int shaderGroupIdx = 2u + 2u * sg.shaderID + 1u;
optixDirectCall<void, ShaderGlobals*, void *>(shaderInitOpIdx, &sg, params); // call osl_init_func
optixDirectCall<void, ShaderGlobals*, void *>(shaderGroupIdx , &sg, params); // call osl_group_func
float3 result = process_closure ((OSL::ClosureColor*) sg.Ci);
uint3 launch_dims = optixGetLaunchDimensions();
uint3 launch_index = optixGetLaunchIndex();
float3* output_buffer = reinterpret_cast<float3 *>(render_params.output_buffer);
int pixel = launch_index.y * launch_dims.x + launch_index.x;
output_buffer[pixel] = make_float3(result.x, result.y, result.z);
}
#endif //#if (OPTIX_VERSION < 70000)
|
46876a9cd6c64f7973c389d26a537bfa533edb4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is machine problem 1, part 1, shift cypher
*
* The problem is to take in a string of unsigned ints and an int,
* the shift amount, and add the number to each element of
* the string, effectively "shifting" each element in the
* string.
* SUBMISSION GUIDELINES:
* You should copy the complete shift_cyper function from your solution
* into a file called mp1-part1-solution-kernel.cu and submit that file.
* The function needs to have exactly the same interface (including __global__)
* as the empty shift_cypher function given below.
*/
#include <stdlib.h>
#include <stdio.h>
#include <ctime>
#include "mp1-util.h"
// Repeating from the tutorial, just in case you haven't looked at it.
// "kernels" or __global__ functions are the entry points to code that executes on the GPU
// The keyword __global__ indicates to the compiler that this function is a GPU entry point.
// __global__ functions must return void, and may only be called or "launched" from code that
// executes on the CPU.
void host_shift_cypher(unsigned int *input_array, unsigned int *output_array, unsigned int shift_amount, unsigned int alphabet_max, unsigned int array_length)
{
for(unsigned int i=0;i<array_length;i++)
{
int element = input_array[i];
int shifted = element + shift_amount;
if(shifted > alphabet_max)
{
shifted = shifted % (alphabet_max + 1);
}
output_array[i] = shifted;
}
}
// This kernel implements a per element shift
__global__ void shift_cypher(unsigned int *input_array, unsigned int *output_array, unsigned int shift_amount, unsigned int alphabet_max, unsigned int array_length)
{
// TODO your code here
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
int element = input_array[i];
int shifted = element+shift_amount;
if (shifted > alphabet_max) {
shifted = shifted % (alphabet_max + 1);
}
output_array[i] = shifted;
}
int main(void)
{
// initialize
srand(time(NULL));
// create arrays of 16M elements
int num_elements = 1 << 24;
unsigned int alphabet_max = 45647;
// compute the size of the arrays in bytes
int num_bytes = num_elements * sizeof(unsigned int);
// pointers to host & device arrays
unsigned int *host_input_array = 0;
unsigned int *host_output_array = 0;
unsigned int *host_output_checker_array = 0;
unsigned int *device_input_array = 0;
unsigned int *device_output_array = 0;
event_pair timer;
// malloc host arrays
host_input_array = (unsigned int*)malloc(num_bytes);
host_output_array = (unsigned int*)malloc(num_bytes);
host_output_checker_array = (unsigned int*)malloc(num_bytes);
// hipMalloc device arrays
hipMalloc((void**)&device_input_array, num_bytes);
hipMalloc((void**)&device_output_array, num_bytes);
// if either memory allocation failed, report an error message
if(host_input_array == 0 || host_output_array == 0 || host_output_checker_array == 0 ||
device_input_array == 0 || device_output_array == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// generate random input string
unsigned int shift_amount = rand();
for(int i=0;i< num_elements;i++)
{
host_input_array[i] = (unsigned int)rand();
}
// do copies to and from gpu once to get rid of timing weirdness
// on first time accesses due to driver
hipMemcpy(device_input_array, host_input_array, num_bytes, hipMemcpyHostToDevice);
hipMemcpy(host_output_array, device_output_array, num_bytes, hipMemcpyDeviceToHost);
start_timer(&timer);
// copy input to GPU
hipMemcpy(device_input_array, host_input_array, num_bytes, hipMemcpyHostToDevice);
check_launch("copy to gpu");
stop_timer(&timer,"copy to gpu");
// choose a number of threads per block
// we use 512 threads here
int block_size = 512;
int grid_size = (num_elements + block_size - 1) / block_size;
start_timer(&timer);
// launch kernel
hipLaunchKernelGGL(( shift_cypher), dim3(grid_size),dim3(block_size), 0, 0, device_input_array, device_output_array, shift_amount, alphabet_max, num_elements);
check_launch("gpu shift cypher");
stop_timer(&timer,"gpu shift cypher");
start_timer(&timer);
// download and inspect the result on the host:
hipMemcpy(host_output_array, device_output_array, num_bytes, hipMemcpyDeviceToHost);
check_launch("copy from gpu");
stop_timer(&timer,"copy from gpu");
start_timer(&timer);
// generate reference output
host_shift_cypher(host_input_array, host_output_checker_array, shift_amount, alphabet_max, num_elements);
stop_timer(&timer,"host shift cypher");
// check CUDA output versus reference output
int error = 0;
for(int i=0;i<num_elements;i++)
{
if(host_output_array[i] != host_output_checker_array[i])
{
error = 1;
}
}
if(error)
{
printf("Output of CUDA version and normal version didn't match! \n");
}
else
{
printf("Worked! CUDA and reference output match. \n");
}
// deallocate memory
free(host_input_array);
free(host_output_array);
free(host_output_checker_array);
hipFree(device_input_array);
hipFree(device_output_array);
}
| 46876a9cd6c64f7973c389d26a537bfa533edb4b.cu | /* This is machine problem 1, part 1, shift cypher
*
* The problem is to take in a string of unsigned ints and an int,
* the shift amount, and add the number to each element of
* the string, effectively "shifting" each element in the
* string.
* SUBMISSION GUIDELINES:
* You should copy the complete shift_cyper function from your solution
* into a file called mp1-part1-solution-kernel.cu and submit that file.
* The function needs to have exactly the same interface (including __global__)
* as the empty shift_cypher function given below.
*/
#include <stdlib.h>
#include <stdio.h>
#include <ctime>
#include "mp1-util.h"
// Repeating from the tutorial, just in case you haven't looked at it.
// "kernels" or __global__ functions are the entry points to code that executes on the GPU
// The keyword __global__ indicates to the compiler that this function is a GPU entry point.
// __global__ functions must return void, and may only be called or "launched" from code that
// executes on the CPU.
void host_shift_cypher(unsigned int *input_array, unsigned int *output_array, unsigned int shift_amount, unsigned int alphabet_max, unsigned int array_length)
{
for(unsigned int i=0;i<array_length;i++)
{
int element = input_array[i];
int shifted = element + shift_amount;
if(shifted > alphabet_max)
{
shifted = shifted % (alphabet_max + 1);
}
output_array[i] = shifted;
}
}
// This kernel implements a per element shift
__global__ void shift_cypher(unsigned int *input_array, unsigned int *output_array, unsigned int shift_amount, unsigned int alphabet_max, unsigned int array_length)
{
// TODO your code here
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
int element = input_array[i];
int shifted = element+shift_amount;
if (shifted > alphabet_max) {
shifted = shifted % (alphabet_max + 1);
}
output_array[i] = shifted;
}
int main(void)
{
// initialize
srand(time(NULL));
// create arrays of 16M elements
int num_elements = 1 << 24;
unsigned int alphabet_max = 45647;
// compute the size of the arrays in bytes
int num_bytes = num_elements * sizeof(unsigned int);
// pointers to host & device arrays
unsigned int *host_input_array = 0;
unsigned int *host_output_array = 0;
unsigned int *host_output_checker_array = 0;
unsigned int *device_input_array = 0;
unsigned int *device_output_array = 0;
event_pair timer;
// malloc host arrays
host_input_array = (unsigned int*)malloc(num_bytes);
host_output_array = (unsigned int*)malloc(num_bytes);
host_output_checker_array = (unsigned int*)malloc(num_bytes);
// cudaMalloc device arrays
cudaMalloc((void**)&device_input_array, num_bytes);
cudaMalloc((void**)&device_output_array, num_bytes);
// if either memory allocation failed, report an error message
if(host_input_array == 0 || host_output_array == 0 || host_output_checker_array == 0 ||
device_input_array == 0 || device_output_array == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// generate random input string
unsigned int shift_amount = rand();
for(int i=0;i< num_elements;i++)
{
host_input_array[i] = (unsigned int)rand();
}
// do copies to and from gpu once to get rid of timing weirdness
// on first time accesses due to driver
cudaMemcpy(device_input_array, host_input_array, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(host_output_array, device_output_array, num_bytes, cudaMemcpyDeviceToHost);
start_timer(&timer);
// copy input to GPU
cudaMemcpy(device_input_array, host_input_array, num_bytes, cudaMemcpyHostToDevice);
check_launch("copy to gpu");
stop_timer(&timer,"copy to gpu");
// choose a number of threads per block
// we use 512 threads here
int block_size = 512;
int grid_size = (num_elements + block_size - 1) / block_size;
start_timer(&timer);
// launch kernel
shift_cypher<<<grid_size,block_size>>>(device_input_array, device_output_array, shift_amount, alphabet_max, num_elements);
check_launch("gpu shift cypher");
stop_timer(&timer,"gpu shift cypher");
start_timer(&timer);
// download and inspect the result on the host:
cudaMemcpy(host_output_array, device_output_array, num_bytes, cudaMemcpyDeviceToHost);
check_launch("copy from gpu");
stop_timer(&timer,"copy from gpu");
start_timer(&timer);
// generate reference output
host_shift_cypher(host_input_array, host_output_checker_array, shift_amount, alphabet_max, num_elements);
stop_timer(&timer,"host shift cypher");
// check CUDA output versus reference output
int error = 0;
for(int i=0;i<num_elements;i++)
{
if(host_output_array[i] != host_output_checker_array[i])
{
error = 1;
}
}
if(error)
{
printf("Output of CUDA version and normal version didn't match! \n");
}
else
{
printf("Worked! CUDA and reference output match. \n");
}
// deallocate memory
free(host_input_array);
free(host_output_array);
free(host_output_checker_array);
cudaFree(device_input_array);
cudaFree(device_output_array);
}
|
d4ec98801305b9a48c306676eed1319bffbdc14f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -std=c++14 -fsyntax-only -verify %s
#include "Inputs/cuda.h"
template <typename T>
__global__ T foo() {
// expected-note@-1 {{kernel function type 'T ()' must have void return type}}
}
void f0() {
hipLaunchKernelGGL(( foo<void>), dim3(0), dim3(0), 0, 0, );
hipLaunchKernelGGL(( foo<int>), dim3(0), dim3(0), 0, 0, );
// expected-error@-1 {{no matching function for call to 'foo'}}
}
__global__ auto f1() {
}
__global__ auto f2(int x) {
return x + 1;
// expected-error@-2 {{kernel function type 'auto (int)' must have void return type}}
}
template <bool Cond, typename T = void> struct enable_if { typedef T type; };
template <typename T> struct enable_if<false, T> {};
template <int N>
__global__
auto bar() -> typename enable_if<N == 1>::type {
// expected-note@-1 {{requirement '3 == 1' was not satisfied [with N = 3]}}
}
template <int N>
__global__
auto bar() -> typename enable_if<N == 2>::type {
// expected-note@-1 {{requirement '3 == 2' was not satisfied [with N = 3]}}
}
void f3() {
hipLaunchKernelGGL(( bar<1>), dim3(0), dim3(0), 0, 0, );
hipLaunchKernelGGL(( bar<2>), dim3(0), dim3(0), 0, 0, );
hipLaunchKernelGGL(( bar<3>), dim3(0), dim3(0), 0, 0, );
// expected-error@-1 {{no matching function for call to 'bar'}}
}
| d4ec98801305b9a48c306676eed1319bffbdc14f.cu | // RUN: %clang_cc1 -std=c++14 -fsyntax-only -verify %s
#include "Inputs/cuda.h"
template <typename T>
__global__ T foo() {
// expected-note@-1 {{kernel function type 'T ()' must have void return type}}
}
void f0() {
foo<void><<<0, 0>>>();
foo<int><<<0, 0>>>();
// expected-error@-1 {{no matching function for call to 'foo'}}
}
__global__ auto f1() {
}
__global__ auto f2(int x) {
return x + 1;
// expected-error@-2 {{kernel function type 'auto (int)' must have void return type}}
}
template <bool Cond, typename T = void> struct enable_if { typedef T type; };
template <typename T> struct enable_if<false, T> {};
template <int N>
__global__
auto bar() -> typename enable_if<N == 1>::type {
// expected-note@-1 {{requirement '3 == 1' was not satisfied [with N = 3]}}
}
template <int N>
__global__
auto bar() -> typename enable_if<N == 2>::type {
// expected-note@-1 {{requirement '3 == 2' was not satisfied [with N = 3]}}
}
void f3() {
bar<1><<<0, 0>>>();
bar<2><<<0, 0>>>();
bar<3><<<0, 0>>>();
// expected-error@-1 {{no matching function for call to 'bar'}}
}
|
2fe24b80d98689d33ba0e9903df4dd4499f4c345.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "commons_gpu.cuh"
#include "definitions.hpp"
#include "kernels_hip.cuh"
__global__ void k_setup_rng(const uvec2 dims,
hiprandState_t *const __restrict__ globalRandState,
const uint seed) {
// idxMin = thread ID + skip safety rows + skip safety cols
// idxMax = y - 1 full rows + last row cols
const uint stride = gridDim.x * blockDim.x,
idxMin = blockDim.x * blockIdx.x + threadIdx.x +
(dims.x * NH_RADIUS) + NH_RADIUS,
idxMax = (dims.y - NH_RADIUS) * dims.x - NH_RADIUS,
xMax = dims.x - NH_RADIUS;
for (uint idx = idxMin; idx < idxMax; idx += stride) {
const uint x = idx % dims.x;
if (NH_RADIUS < x && x < xMax)
hiprand_init(seed, idx, 0, &globalRandState[idx]);
}
}
__global__ void k_init_grid(GridType *const grid, const uvec2 dims,
hiprandState_t *const __restrict__ globalRandState,
const float spawnProbability) {
// idxMin = thread ID + skip safety rows + skip safety cols
// idxMax = y - 1 full rows + last row cols OR max id given radius
const uint stride = gridDim.x * blockDim.x,
idxMin = blockDim.x * blockIdx.x + threadIdx.x +
(dims.x * NH_RADIUS) + NH_RADIUS,
idxMax = (dims.y - NH_RADIUS) * dims.x - NH_RADIUS,
xMax = dims.x - NH_RADIUS;
for (uint idx = idxMin; idx < idxMax; idx += stride) {
const uint x = idx % dims.x;
grid[idx] = (NH_RADIUS < x) * (x < xMax) *
(hiprand_uniform(&globalRandState[idx]) < spawnProbability);
}
}
__global__ void k_reset_grid_buffers(fvec2s *const __restrict__ gridVertices,
const uvec2 numVertices) {
const uint stride = gridDim.x * blockDim.x,
idxMax = numVertices.x * numVertices.y;
for (uint idx = blockDim.x * blockIdx.x + threadIdx.x; idx < idxMax;
idx += stride)
gridVertices[idx].state = 0;
}
__global__ void
k_update_grid_buffers(const GridType *const grid, const uvec2 dims,
fvec2s *const __restrict__ gridVertices,
const uint numVerticesX, const uvec2 cellDensity,
const ulim2 gridLimX, const ulim2 gridLimY) {
// idxMin = thread ID + render margin
// idxMax = y - 1 full rows + last row cols
const uint stride = gridDim.x * blockDim.x,
idxMin = blockDim.x * blockIdx.x + threadIdx.x +
max(gridLimY.start * dims.x + gridLimX.start,
(dims.x * NH_RADIUS) + NH_RADIUS),
idxMax = min((gridLimY.end - 1) * dims.x + gridLimX.end,
(dims.y - NH_RADIUS) * dims.x - NH_RADIUS),
xMin = max(NH_RADIUS, gridLimX.start),
xMax = min(dims.x - NH_RADIUS, gridLimX.end);
for (uint idx = idxMin; idx < idxMax; idx += stride) {
// to check if out of bounds and to map vertices
const uint x = idx % dims.x, y = idx / dims.x;
// try avoiding further operations when not needed
// atomicMax is pretty expensive
if (xMin <= x && x < xMax && grid[idx]) {
// calculate mapping between grid and vertice
const uint vx = (x - gridLimX.start) / cellDensity.x,
vy = (y - gridLimY.start) / cellDensity.y,
vidx = vy * numVerticesX + vx;
// no need to be atomic on a read
// we check before to avoid atomic writing bottleneck
// gridVertices[vidx].state = 1;
// gridVertices[vidx].state =
// max(gridVertices[vidx].state, static_cast<int>(grid[idx]));
if (gridVertices[vidx].state == 0)
atomicMax(&gridVertices[vidx].state,
static_cast<int>(grid[idx]));
}
}
}
__global__ void
k_evolve_count_rule(const GridType *const grid, GridType *const nextGrid,
const uvec2 dims,
hiprandState_t *const __restrict__ globalRandState,
const float virtualSpawnProbability,
const bool countAliveCells, uint *const activeCellCount) {
// idxMin = thread ID + skip safety rows + skip safety cols
// idxMax = y - 1 full rows + last row cols
const uint stride = gridDim.x * blockDim.x,
idxMin = blockDim.x * blockIdx.x + threadIdx.x +
(dims.x * NH_RADIUS) + NH_RADIUS,
idxMax = (dims.y - NH_RADIUS) * dims.x - NH_RADIUS;
for (uint idx = idxMin; idx < idxMax; idx += stride) {
const uint x = idx % dims.x;
// if col is 0 or dims.x-1, given MxN grid and NH_RADIUS=1
if (x < NH_RADIUS || dims.x - NH_RADIUS <= x)
continue;
// check cell state
nextGrid[idx] = game_of_life(grid[idx], count_nh(grid, dims.x, idx));
// add a "virtual particle" spawn probability
// note: this branching does not cause significant perf. hit
if (virtualSpawnProbability > 0 && nextGrid[idx] == 0)
nextGrid[idx] =
hiprand_uniform(&globalRandState[idx]) < virtualSpawnProbability;
// avoid atomicAdd when not necessary
if (countAliveCells)
atomicAdd(activeCellCount, static_cast<uint>(nextGrid[idx] > 0));
}
}
| 2fe24b80d98689d33ba0e9903df4dd4499f4c345.cu | #include "commons_gpu.cuh"
#include "definitions.hpp"
#include "kernels.cuh"
__global__ void k_setup_rng(const uvec2 dims,
curandState *const __restrict__ globalRandState,
const uint seed) {
// idxMin = thread ID + skip safety rows + skip safety cols
// idxMax = y - 1 full rows + last row cols
const uint stride = gridDim.x * blockDim.x,
idxMin = blockDim.x * blockIdx.x + threadIdx.x +
(dims.x * NH_RADIUS) + NH_RADIUS,
idxMax = (dims.y - NH_RADIUS) * dims.x - NH_RADIUS,
xMax = dims.x - NH_RADIUS;
for (uint idx = idxMin; idx < idxMax; idx += stride) {
const uint x = idx % dims.x;
if (NH_RADIUS < x && x < xMax)
curand_init(seed, idx, 0, &globalRandState[idx]);
}
}
__global__ void k_init_grid(GridType *const grid, const uvec2 dims,
curandState *const __restrict__ globalRandState,
const float spawnProbability) {
// idxMin = thread ID + skip safety rows + skip safety cols
// idxMax = y - 1 full rows + last row cols OR max id given radius
const uint stride = gridDim.x * blockDim.x,
idxMin = blockDim.x * blockIdx.x + threadIdx.x +
(dims.x * NH_RADIUS) + NH_RADIUS,
idxMax = (dims.y - NH_RADIUS) * dims.x - NH_RADIUS,
xMax = dims.x - NH_RADIUS;
for (uint idx = idxMin; idx < idxMax; idx += stride) {
const uint x = idx % dims.x;
grid[idx] = (NH_RADIUS < x) * (x < xMax) *
(curand_uniform(&globalRandState[idx]) < spawnProbability);
}
}
__global__ void k_reset_grid_buffers(fvec2s *const __restrict__ gridVertices,
const uvec2 numVertices) {
const uint stride = gridDim.x * blockDim.x,
idxMax = numVertices.x * numVertices.y;
for (uint idx = blockDim.x * blockIdx.x + threadIdx.x; idx < idxMax;
idx += stride)
gridVertices[idx].state = 0;
}
__global__ void
k_update_grid_buffers(const GridType *const grid, const uvec2 dims,
fvec2s *const __restrict__ gridVertices,
const uint numVerticesX, const uvec2 cellDensity,
const ulim2 gridLimX, const ulim2 gridLimY) {
// idxMin = thread ID + render margin
// idxMax = y - 1 full rows + last row cols
const uint stride = gridDim.x * blockDim.x,
idxMin = blockDim.x * blockIdx.x + threadIdx.x +
max(gridLimY.start * dims.x + gridLimX.start,
(dims.x * NH_RADIUS) + NH_RADIUS),
idxMax = min((gridLimY.end - 1) * dims.x + gridLimX.end,
(dims.y - NH_RADIUS) * dims.x - NH_RADIUS),
xMin = max(NH_RADIUS, gridLimX.start),
xMax = min(dims.x - NH_RADIUS, gridLimX.end);
for (uint idx = idxMin; idx < idxMax; idx += stride) {
// to check if out of bounds and to map vertices
const uint x = idx % dims.x, y = idx / dims.x;
// try avoiding further operations when not needed
// atomicMax is pretty expensive
if (xMin <= x && x < xMax && grid[idx]) {
// calculate mapping between grid and vertice
const uint vx = (x - gridLimX.start) / cellDensity.x,
vy = (y - gridLimY.start) / cellDensity.y,
vidx = vy * numVerticesX + vx;
// no need to be atomic on a read
// we check before to avoid atomic writing bottleneck
// gridVertices[vidx].state = 1;
// gridVertices[vidx].state =
// max(gridVertices[vidx].state, static_cast<int>(grid[idx]));
if (gridVertices[vidx].state == 0)
atomicMax(&gridVertices[vidx].state,
static_cast<int>(grid[idx]));
}
}
}
__global__ void
k_evolve_count_rule(const GridType *const grid, GridType *const nextGrid,
const uvec2 dims,
curandState *const __restrict__ globalRandState,
const float virtualSpawnProbability,
const bool countAliveCells, uint *const activeCellCount) {
// idxMin = thread ID + skip safety rows + skip safety cols
// idxMax = y - 1 full rows + last row cols
const uint stride = gridDim.x * blockDim.x,
idxMin = blockDim.x * blockIdx.x + threadIdx.x +
(dims.x * NH_RADIUS) + NH_RADIUS,
idxMax = (dims.y - NH_RADIUS) * dims.x - NH_RADIUS;
for (uint idx = idxMin; idx < idxMax; idx += stride) {
const uint x = idx % dims.x;
// if col is 0 or dims.x-1, given MxN grid and NH_RADIUS=1
if (x < NH_RADIUS || dims.x - NH_RADIUS <= x)
continue;
// check cell state
nextGrid[idx] = game_of_life(grid[idx], count_nh(grid, dims.x, idx));
// add a "virtual particle" spawn probability
// note: this branching does not cause significant perf. hit
if (virtualSpawnProbability > 0 && nextGrid[idx] == 0)
nextGrid[idx] =
curand_uniform(&globalRandState[idx]) < virtualSpawnProbability;
// avoid atomicAdd when not necessary
if (countAliveCells)
atomicAdd(activeCellCount, static_cast<uint>(nextGrid[idx] > 0));
}
}
|
5c369ad7a91dc92861adbf7ad454d41e2314b87d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S1.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.4172552153702,0.00133233093318418,0.775980725003160,0.775871451583533,0.000178484465968596,0.483518904573916,0.00297208335439809,0.999998297825169,1.98274727808946e-08,1.92952362196655e-05,0.999768268008847,1.00667048889468,0.999984854519288,5.50424977684767e-05,0.352485262813812,10.8673127043200,138.860197273148};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated version from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| 5c369ad7a91dc92861adbf7ad454d41e2314b87d.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S1.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.4172552153702,0.00133233093318418,0.775980725003160,0.775871451583533,0.000178484465968596,0.483518904573916,0.00297208335439809,0.999998297825169,1.98274727808946e-08,1.92952362196655e-05,0.999768268008847,1.00667048889468,0.999984854519288,5.50424977684767e-05,0.352485262813812,10.8673127043200,138.860197273148};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated version from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
d6fab437dead0373ffcfbd4adcf8708083623ed3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/***
* File: maxwell_griffin_lab4p2.cu
* Desc: Performs 2 Sobel edge detection operations on a .bmp, once by a
* serial algorithm, and once by a massively parallel CUDA algorithm.
*/
extern "C"
{
}
#define PIXEL_BLACK (0)
#define PIXEL_WHITE (255)
#define PERCENT_BLACK_THRESHOLD (0.75)
#define CUDA_GRIDS (1)
#define CUDA_BLOCKS_PER_GRID (32)
#define CUDA_THREADS_PER_BLOCK (128)
#define MS_PER_SEC (1000)
#define NS_PER_MS (1000 * 1000)
#define NS_PER_SEC (NS_PER_MS * MS_PER_SEC)
#define LINEARIZE(row, col, dim) \
(((row) * (dim)) + (col))
static struct timespec rtcSerialStart;
static struct timespec rtcSerialEnd;
static struct timespec rtcParallelStart;
static struct timespec rtcParallelEnd;
__device__ int Sobel_Gx[3][3] = {
{ -1, 0, 1 },
{ -2, 0, 2 },
{ -1, 0, 1 }
};
__device__ int Sobel_Gy[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
/*
* Display all header information and matrix and CUDA parameters.
*
* @param inputFile -- name of the input image
* @param serialOutputFile -- name of the serial output image
* @param parallelOutputFile -- name of the parallel output image
* @param imageHeight -- in pixels
* @param imageWidth -- in pixels
*/
void DisplayParameters(
char *inputFile,
char *serialOutputFile,
char *cudaOutputFile,
int imageHeight,
int imageWidth)
{
printf("********************************************************************************\n");
printf("lab4p2: serial vs. CUDA Sobel edge detection.\n");
printf("\n");
printf("Input image: %s \t(Height: %d pixels, width: %d pixels)\n", inputFile, imageHeight, imageWidth);
printf("Serial output image: \t%s\n", serialOutputFile);
printf("CUDA output image: \t%s\n", cudaOutputFile);
printf("\n");
printf("CUDA compute structure:\n");
printf("|-- with %d grid\n", CUDA_GRIDS);
printf(" |-- with %d blocks\n", CUDA_BLOCKS_PER_GRID);
printf(" |-- with %d threads per block\n", CUDA_THREADS_PER_BLOCK);
printf("\n");
}
/*
* Display the timing and convergence results to the screen.
*
* @param serialConvergenceThreshold
* @param serialConvergenceThreshold
*/
void DisplayResults(
int serialConvergenceThreshold,
int parallelConvergenceThreshold)
{
printf("Time taken for serial Sobel edge detection: %lf\n",
(LINEARIZE(rtcSerialEnd.tv_sec, rtcSerialEnd.tv_nsec, NS_PER_SEC)
- LINEARIZE(rtcSerialStart.tv_sec, rtcSerialStart.tv_nsec, NS_PER_SEC))
/ ((double)NS_PER_SEC));
printf("Convergence Threshold: %d\n", serialConvergenceThreshold);
printf("\n");
printf("Time taken for CUDA Sobel edge detection: %lf\n",
(LINEARIZE(rtcParallelEnd.tv_sec, rtcParallelEnd.tv_nsec, NS_PER_SEC)
- LINEARIZE(rtcParallelStart.tv_sec, rtcParallelStart.tv_nsec, NS_PER_SEC))
/ ((double)NS_PER_SEC));
printf("Convergence Threshold: %d\n", parallelConvergenceThreshold);
printf("********************************************************************************\n");
}
/*
* Serial algorithm to keep perform a Sobel edge detection on an input pixel
* buffer at different brightness thresholds until a certain percentage of
* pixels in the output pixel buffer are black.
*
* @param input -- input pixel buffer
* @param output -- output pixel buffer
* @param height -- height of pixel image
* @param width -- width of pixel image
* @return -- gradient threshold at which PERCENT_BLACK_THRESHOLD pixels are black
*/
__global__ void CudaSobelEdgeDetection(uint8_t *input, uint8_t *output, int height, int width, int gradientThreshold)
{
int row = 0;
for(int i = 0; row < (height - 1); i++)
{
// Let the blockIdx increment beyond its dimension for cyclic distribution of the test pixels
int blockRow = (i * gridDim.x) + blockIdx.x;
// Calculate the row/col in the image buffer that this thread's stencil's center is on
row = (LINEARIZE(blockRow, threadIdx.x, blockDim.x) / (width - 2)) + 1;
int col = (LINEARIZE(blockRow, threadIdx.x, blockDim.x) % (width - 2)) + 1;
// Calculate Sobel magnitude of gradient directly, instead of using Sobel_Magnitude utility
double Gx = (Sobel_Gx[0][0] * input[LINEARIZE(row - 1, col - 1, width)])
+ (Sobel_Gx[0][2] * input[LINEARIZE(row - 1, col + 1, width)])
+ (Sobel_Gx[1][0] * input[LINEARIZE(row, col - 1, width)])
+ (Sobel_Gx[1][2] * input[LINEARIZE(row, col + 1, width)])
+ (Sobel_Gx[2][0] * input[LINEARIZE(row + 1, col - 1, width)])
+ (Sobel_Gx[2][2] * input[LINEARIZE(row + 1, col + 1, width)]);
double Gy = (Sobel_Gy[0][0] * input[LINEARIZE(row - 1, col - 1, width)])
+ (Sobel_Gy[0][1] * input[LINEARIZE(row - 1, col, width)])
+ (Sobel_Gy[0][2] * input[LINEARIZE(row - 1, col + 1, width)])
+ (Sobel_Gy[2][0] * input[LINEARIZE(row + 1, col - 1, width)])
+ (Sobel_Gy[2][1] * input[LINEARIZE(row + 1, col, width)])
+ (Sobel_Gy[2][2] * input[LINEARIZE(row + 1, col + 1, width)]);
if(((Gx * Gx) + (Gy * Gy)) > (gradientThreshold * gradientThreshold))
{
output[LINEARIZE(row, col, width)] = PIXEL_WHITE;
}
else
{
output[LINEARIZE(row, col, width)] = PIXEL_BLACK;
}
}
} | d6fab437dead0373ffcfbd4adcf8708083623ed3.cu | #include "includes.h"
/***
* File: maxwell_griffin_lab4p2.cu
* Desc: Performs 2 Sobel edge detection operations on a .bmp, once by a
* serial algorithm, and once by a massively parallel CUDA algorithm.
*/
extern "C"
{
}
#define PIXEL_BLACK (0)
#define PIXEL_WHITE (255)
#define PERCENT_BLACK_THRESHOLD (0.75)
#define CUDA_GRIDS (1)
#define CUDA_BLOCKS_PER_GRID (32)
#define CUDA_THREADS_PER_BLOCK (128)
#define MS_PER_SEC (1000)
#define NS_PER_MS (1000 * 1000)
#define NS_PER_SEC (NS_PER_MS * MS_PER_SEC)
#define LINEARIZE(row, col, dim) \
(((row) * (dim)) + (col))
static struct timespec rtcSerialStart;
static struct timespec rtcSerialEnd;
static struct timespec rtcParallelStart;
static struct timespec rtcParallelEnd;
__device__ int Sobel_Gx[3][3] = {
{ -1, 0, 1 },
{ -2, 0, 2 },
{ -1, 0, 1 }
};
__device__ int Sobel_Gy[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
/*
* Display all header information and matrix and CUDA parameters.
*
* @param inputFile -- name of the input image
* @param serialOutputFile -- name of the serial output image
* @param parallelOutputFile -- name of the parallel output image
* @param imageHeight -- in pixels
* @param imageWidth -- in pixels
*/
void DisplayParameters(
char *inputFile,
char *serialOutputFile,
char *cudaOutputFile,
int imageHeight,
int imageWidth)
{
printf("********************************************************************************\n");
printf("lab4p2: serial vs. CUDA Sobel edge detection.\n");
printf("\n");
printf("Input image: %s \t(Height: %d pixels, width: %d pixels)\n", inputFile, imageHeight, imageWidth);
printf("Serial output image: \t%s\n", serialOutputFile);
printf("CUDA output image: \t%s\n", cudaOutputFile);
printf("\n");
printf("CUDA compute structure:\n");
printf("|-- with %d grid\n", CUDA_GRIDS);
printf(" |-- with %d blocks\n", CUDA_BLOCKS_PER_GRID);
printf(" |-- with %d threads per block\n", CUDA_THREADS_PER_BLOCK);
printf("\n");
}
/*
* Display the timing and convergence results to the screen.
*
* @param serialConvergenceThreshold
* @param serialConvergenceThreshold
*/
void DisplayResults(
int serialConvergenceThreshold,
int parallelConvergenceThreshold)
{
printf("Time taken for serial Sobel edge detection: %lf\n",
(LINEARIZE(rtcSerialEnd.tv_sec, rtcSerialEnd.tv_nsec, NS_PER_SEC)
- LINEARIZE(rtcSerialStart.tv_sec, rtcSerialStart.tv_nsec, NS_PER_SEC))
/ ((double)NS_PER_SEC));
printf("Convergence Threshold: %d\n", serialConvergenceThreshold);
printf("\n");
printf("Time taken for CUDA Sobel edge detection: %lf\n",
(LINEARIZE(rtcParallelEnd.tv_sec, rtcParallelEnd.tv_nsec, NS_PER_SEC)
- LINEARIZE(rtcParallelStart.tv_sec, rtcParallelStart.tv_nsec, NS_PER_SEC))
/ ((double)NS_PER_SEC));
printf("Convergence Threshold: %d\n", parallelConvergenceThreshold);
printf("********************************************************************************\n");
}
/*
* Serial algorithm to keep perform a Sobel edge detection on an input pixel
* buffer at different brightness thresholds until a certain percentage of
* pixels in the output pixel buffer are black.
*
* @param input -- input pixel buffer
* @param output -- output pixel buffer
* @param height -- height of pixel image
* @param width -- width of pixel image
* @return -- gradient threshold at which PERCENT_BLACK_THRESHOLD pixels are black
*/
__global__ void CudaSobelEdgeDetection(uint8_t *input, uint8_t *output, int height, int width, int gradientThreshold)
{
int row = 0;
for(int i = 0; row < (height - 1); i++)
{
// Let the blockIdx increment beyond its dimension for cyclic distribution of the test pixels
int blockRow = (i * gridDim.x) + blockIdx.x;
// Calculate the row/col in the image buffer that this thread's stencil's center is on
row = (LINEARIZE(blockRow, threadIdx.x, blockDim.x) / (width - 2)) + 1;
int col = (LINEARIZE(blockRow, threadIdx.x, blockDim.x) % (width - 2)) + 1;
// Calculate Sobel magnitude of gradient directly, instead of using Sobel_Magnitude utility
double Gx = (Sobel_Gx[0][0] * input[LINEARIZE(row - 1, col - 1, width)])
+ (Sobel_Gx[0][2] * input[LINEARIZE(row - 1, col + 1, width)])
+ (Sobel_Gx[1][0] * input[LINEARIZE(row, col - 1, width)])
+ (Sobel_Gx[1][2] * input[LINEARIZE(row, col + 1, width)])
+ (Sobel_Gx[2][0] * input[LINEARIZE(row + 1, col - 1, width)])
+ (Sobel_Gx[2][2] * input[LINEARIZE(row + 1, col + 1, width)]);
double Gy = (Sobel_Gy[0][0] * input[LINEARIZE(row - 1, col - 1, width)])
+ (Sobel_Gy[0][1] * input[LINEARIZE(row - 1, col, width)])
+ (Sobel_Gy[0][2] * input[LINEARIZE(row - 1, col + 1, width)])
+ (Sobel_Gy[2][0] * input[LINEARIZE(row + 1, col - 1, width)])
+ (Sobel_Gy[2][1] * input[LINEARIZE(row + 1, col, width)])
+ (Sobel_Gy[2][2] * input[LINEARIZE(row + 1, col + 1, width)]);
if(((Gx * Gx) + (Gy * Gy)) > (gradientThreshold * gradientThreshold))
{
output[LINEARIZE(row, col, width)] = PIXEL_WHITE;
}
else
{
output[LINEARIZE(row, col, width)] = PIXEL_BLACK;
}
}
} |
3fce314da538e4e84ee1004fda2255dd16e19d94.hip | // !!! This is a file automatically generated by hipify!!!
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
// includes, kernels
#include <common.cu>
#include <mummergpu.h>
#include <mummergpu_kernel.cu>
int USE_PRINT_KERNEL = 1;
#define BREATHING_ROOM (16 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 8388608
//#define BASES_PER_TREE_PAGE 7000000
#define BLOCKSIZE 256
unsigned int cuda_calls = 0;
void trap_dbg()
{
fprintf(stderr, "Trapped\n");
}
#define CUDA_SAFE_CALL( call) do { \
cuda_calls++; \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \
__FILE__, __LINE__, err, hipGetErrorString( err) ); \
trap_dbg(); \
exit(EXIT_FAILURE); \
} } while (0)
# define CU_SAFE_CALL_NO_SYNC( call ) do { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \
err, __FILE__, __LINE__ ); \
exit(EXIT_FAILURE); \
} } while (0)
# define CUT_DEVICE_INIT_DRV(cuDevice) do { \
cuDevice = 0; \
int deviceCount = 0; \
hipError_t err = hipInit(0); \
if (hipSuccess == err) \
CU_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); \
if (deviceCount == 0) { \
fprintf(stderr, "There is no device.\n"); \
exit(EXIT_FAILURE); \
} \
int dev; \
for (dev = 0; dev < deviceCount; ++dev) { \
int major, minor; \
CU_SAFE_CALL_NO_SYNC(hipDeviceComputeCapability(&major, &minor, dev));\
if (major >= 1) \
break; \
} \
if (dev == deviceCount) { \
fprintf(stderr, "There is no device supporting CUDA.\n"); \
exit(EXIT_FAILURE); \
} \
else \
CU_SAFE_CALL_NO_SYNC(hipDeviceGet(&cuDevice, dev)); \
} while (0)
unsigned int num_bind_tex_calls = 0;
#define BIND_TEX(offset, tex, arr, desc, len) do { \
CUDA_SAFE_CALL(hipBindTexture(offset, tex, arr, desc, len)); \
++num_bind_tex_calls; \
} while(0)
#define BIND_TEX_ARRAY(tex, arr, desc) do { \
CUDA_SAFE_CALL(hipBindTextureToArray(tex, arr, desc)); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC(ptr, size) do { \
hipMalloc(ptr, size); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) do { \
hipMallocPitch(ptr, out_pitch, rowsize, numrows); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) do { \
hipMallocArray(ptr, desc, pitch, rows); \
++num_bind_tex_calls; \
} while(0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
void computeGold(MatchResults* results,
char* refstr,
char* queries,
int* queryAddrs,
int* queryLengths,
PixelOfNode* nodeTexture,
PixelOfChildren* childrenTexture,
int numQueries,
int mismatch_length,
int rc);
extern "C"
void getReferenceString(const char * filename, char** refstr, size_t* reflen);
extern "C"
void createTreeTexture(const char * filename,
PixelOfNode** nodeTexture,
PixelOfChildren** childrenTexture,
unsigned int* width,
unsigned int* node_height,
unsigned int* children_height,
AuxiliaryNodeData** aux_data,
int* num_match_coords,
int min_match_len,
Statistics* statistics,
const char * dotfilename,
const char * texfilename);
extern "C"
void getQueriesTexture(int qfile,
char** queryTexture,
size_t* queryLength,
int** queryAddrs,
char*** queryNames,
int** queryLengths,
unsigned int* numQueries,
unsigned int* num_match_coords,
unsigned int device_memory_avail,
int min_match_length,
bool rc);
extern "C"
int lookupNumLeaves(ReferencePage * page, TextureAddress addr);
void printAlignments(ReferencePage* page,
Alignment* alignments,
char* query,
int qrylen,
TextureAddress nodeid,
int qrypos,
int edge_depth,
int min_match,
bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
extern "C"
void mapQueriesEndToEnd(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* h_alignments,
unsigned int numAligments);
char * createTimer()
{
unsigned int * ptr = (unsigned int *) malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
return (char *) ptr;
}
void startTimer(char * ptr)
{
gettimeofday(&(((struct Timer_t *)ptr)->start_m), NULL);
}
void stopTimer(char * ptr)
{
gettimeofday(&(((struct Timer_t *)ptr)->end_m), NULL);
}
float getTimerValue(char * ptr)
{
Timer_t * timer = (Timer_t*) ptr;
if (timer == NULL)
{
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) { stopTimer(ptr); }
return (float) (1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec)
+ (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(char * ptr)
{
free((Timer_t *)ptr);
}
extern "C"
int createReference(const char* fromFile, Reference* ref)
{
if (!fromFile || !ref)
return -1;
char * loadreftimer = createTimer();
startTimer(loadreftimer);
getReferenceString(fromFile, &(ref->str), &(ref->len));
stopTimer(loadreftimer);
ref->t_load_from_disk += getTimerValue(loadreftimer);
deleteTimer(loadreftimer);
return 0;
}
extern "C"
int destroyReference(Reference* ref)
{
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
#if REORDER_REF
free(ref->h_ref_array);
#endif
free(ref->aux_data);
#if TREE_ACCESS_HISTOGRAM
free(ref->h_node_hist);
free(ref->h_child_hist);
#endif
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C"
int createQuerySet(const char* fromFile, QuerySet* queries)
{
fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1)
{
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit (1);
}
queries->qfile = qfile;
return 0;
}
extern "C"
int destroyQuerySet(QuerySet* queries)
{
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C"
void printStringForError(int err)
{
}
extern "C"
int createMatchContext(Reference* ref,
QuerySet* queries,
MatchResults* matches,
bool on_cpu,
int min_match_length,
char* stats_file,
bool reverse,
bool forwardreverse,
bool forwardcoordinates,
bool showQueryLength,
char* dotfilename,
char* texfilename,
MatchContext* ctx) {
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
ctx->on_cpu = on_cpu;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
ctx->dotfilename = dotfilename;
ctx->texfilename = texfilename;
return 0;
}
extern "C"
int destroyMatchContext(MatchContext* ctx)
{
free(ctx->full_ref);
//destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
return 0;
}
void buildReferenceTexture(Reference* ref,
char* full_ref,
size_t begin,
size_t end,
int min_match_len,
char* dotfilename,
char* texfilename,
Statistics* statistics)
{
fprintf(stderr, "Building reference texture...\n");
PixelOfNode* nodeTexture = NULL;
PixelOfChildren * childrenTexture = NULL;
unsigned int width = 0;
unsigned int node_height = 0;
unsigned int children_height = 0;
AuxiliaryNodeData* aux_data = NULL;
int num_nodes;
char * loadreftimer = createTimer();
startTimer(loadreftimer);
ref->len = end - begin + 3;
ref->str = (char*)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
stopTimer(loadreftimer);
statistics->t_ref_from_disk += getTimerValue(loadreftimer) + ref->t_load_from_disk;
deleteTimer(loadreftimer);
createTreeTexture(ref->str,
&nodeTexture,
&childrenTexture,
&width,
&node_height,
&children_height,
&aux_data,
&num_nodes,
min_match_len,
statistics,
dotfilename,
texfilename);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_node_height = node_height;
ref->tex_children_height = children_height;
#if TREE_ACCESS_HISTOGRAM
ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int));
ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int));
#endif
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) +
(width * children_height * sizeof(PixelOfChildren));
fprintf(stderr, "This tree will need %d bytes on the board\n", ref->bytes_on_board);
#if REORDER_REF
char * reordertimer = createTimer();
startTimer(reordertimer);
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
int refstrsize = numrows * refpitch;
ref->h_ref_array = (char *) malloc(refstrsize);
ref->bytes_on_board += refstrsize;
fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize);
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) {
ref->h_ref_array[z] = 'Z';
}
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char* refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++) {
int bigx = i % (block_dim); // ref string reorder
int bigy = i / (block_dim);
y = bigy * blocksize + bigx % blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_array[y*refpitch+x] = refstr[i];
if (x > maxx) {
maxx = x;
}
if (y > maxy) {
maxy = y;
}
}
if ((maxx >= refpitch) || (maxy >= numrows)) {
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
stopTimer(reordertimer);
if (statistics)
statistics->t_reorder_ref_str += getTimerValue(reordertimer);
deleteTimer(reordertimer);
#else
fprintf(stderr, "The refstr requires %d bytes\n", ref->len);
ref->bytes_on_board += ref->len;
#endif
}
void boardMemory(size_t * free_mem, size_t * total_mem)
{
// The emulator doesn't allow calls to cuMemGetInfo
#ifdef __DEVICE_EMULATION__
*free_mem = 512*1024*1024;
*total_mem = 768*1024*1024;
#else
CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem));
#endif
}
void loadReferenceTexture(MatchContext* ctx)
{
Reference* ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
hipChannelFormatDesc refTextureDesc =
hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindSigned);
if (!ctx->on_cpu) {
char * toboardtimer = createTimer();
startTimer(toboardtimer);
#if REFTEX
#if REORDER_REF
CUDA_MALLOC_ARRAY((hipArray**)(&ref->d_ref_array),
&refTextureDesc,
ref->pitch,
numrows);
CUDA_SAFE_CALL(hipMemcpyToArray( (hipArray*)(ref->d_ref_array),
0,
0,
ref->h_ref_array,
numrows*ref->pitch,
hipMemcpyHostToDevice));
reftex.addressMode[0] = hipAddressModeClamp;
reftex.addressMode[1] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false;
BIND_TEX_ARRAY(reftex, (hipArray*)ref->d_ref_array, refTextureDesc);
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL( hipMemcpy( (void*)(ref->d_ref_array),
ref->str,
ref->len,
hipMemcpyHostToDevice) );
reftex.addressMode[0] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false; // access with normalized texture coordinates
hipChannelFormatDesc refDesc =
hipCreateChannelDesc(8,0,0,0, hipChannelFormatKindUnsigned);
BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len);
ctx->ref->bytes_on_board += ref->len;
#endif
#else
#if REORDER_REF
size_t refpitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_ref_array),
&refpitch,
ref->pitch * sizeof(char),
numrows);
CUDA_SAFE_CALL( hipMemcpy2D((ref->d_ref_array),
refpitch,
ref->h_ref_array,
ref->pitch ,
ref->pitch * sizeof(char),
numrows,
hipMemcpyHostToDevice));
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL( hipMemcpy( (void*)(ref->d_ref_array),
ref->str,
ref->len,
hipMemcpyHostToDevice) );
ctx->ref->bytes_on_board += ref->len;
#endif
#endif
stopTimer(toboardtimer);
ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ref->d_ref_array = NULL;
}
}
void unloadReferenceString(Reference* ref)
{
#if REFTEX
CUDA_SAFE_CALL(hipUnbindTexture( reftex ) );
#endif
#if REORDER_REF && REFTEX
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_ref_array)));
#else
CUDA_SAFE_CALL(hipFree((ref->d_ref_array)));
#endif
ref->d_ref_array = NULL;
}
void unloadReferenceTree(MatchContext* ctx)
{
Reference* ref = ctx->ref;
#if REORDER_TREE
// Unload nodetex
#if NODETEX
CUDA_SAFE_CALL(hipUnbindTexture( nodetex ) );
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_node_tex_array)));
#else
CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array));
#endif
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array)
{
#if CHILDTEX
CUDA_SAFE_CALL(hipUnbindTexture( childrentex ) );
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_children_tex_array)));
#else
CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array));
#endif
}
ref->d_children_tex_array = NULL;
#else
#if NODETEX
CUDA_SAFE_CALL(hipUnbindTexture( nodetex ) );
#endif
CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array));
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array)
{
#if CHILDTEX
CUDA_SAFE_CALL(hipUnbindTexture( childrentex ) );
#endif
CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array));
ref->d_children_tex_array = NULL;
}
#endif
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(hipFree(ref->d_node_hist));
ref->d_node_hist = NULL;
CUDA_SAFE_CALL(hipFree(ref->d_child_hist));
ref->d_child_hist = NULL;
#endif
}
//loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext* ctx) {
Reference* ref = ctx->ref;
ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
char * toboardtimer = createTimer();
startTimer(toboardtimer);
// node texels
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode));
// children texels
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren);
#if REORDER_TREE
#if NODETEX
hipChannelFormatDesc nodeTextureDesc =
hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY( (hipArray**)(&ref->d_node_tex_array),
&nodeTextureDesc,
ref->tex_width,
ref->tex_node_height );
CUDA_SAFE_CALL( hipMemcpyToArray( (hipArray*)(ref->d_node_tex_array),
0,
0,
ref->h_node_tex_array,
ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.addressMode[1] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(nodetex, (hipArray*)ref->d_node_tex_array,
nodeTextureDesc);
#else
size_t nodepitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_node_tex_array),
&nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height );
CUDA_SAFE_CALL( hipMemcpy2D((ref->d_node_tex_array),
nodepitch,
ref->h_node_tex_array,
nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height,
hipMemcpyHostToDevice));
#endif
if (ref->tex_children_height)
{
#if CHILDTEX
hipChannelFormatDesc childrenTextureDesc =
hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY( (hipArray**)(&ref->d_children_tex_array),
&childrenTextureDesc,
ref->tex_width,
ref->tex_children_height );
CUDA_SAFE_CALL( hipMemcpyToArray((hipArray*)(ref->d_children_tex_array),
0,
0,
ref->h_children_tex_array,
ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.addressMode[1] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(childrentex, (hipArray*)(ref->d_children_tex_array),
childrenTextureDesc);
#else
size_t childpitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_children_tex_array),
&childpitch,
ref->tex_width * sizeof(PixelOfChildren),
ref->tex_children_height );
CUDA_SAFE_CALL( hipMemcpy2D((ref->d_children_tex_array),
childpitch,
ref->h_children_tex_array,
childpitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_children_height,
hipMemcpyHostToDevice));
#endif
}
#if TREE_ACCESS_HISTOGRAM
// node hist
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_node_hist),
ref->tex_width * ref->tex_node_height *sizeof(int));
CUDA_SAFE_CALL( hipMemset((ref->d_node_hist),0,
ref->tex_width * ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height)
{
// children hist
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int);
fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n", ref->bytes_on_board);
CUDA_MALLOC( (void**)(&ref->d_child_hist),
ref->tex_width * ref->tex_children_height *sizeof(int));
CUDA_SAFE_CALL( hipMemset((ref->d_child_hist),0,
ref->tex_width * ref->tex_children_height * sizeof(int)));
}
#endif
#else // NO TREE REORDERING
// Node tex, 1-dimensional
CUDA_MALLOC( (void**)(&ref->d_node_tex_array),
ref->tex_node_height * sizeof(PixelOfNode));
CUDA_SAFE_CALL( hipMemcpy( (ref->d_node_tex_array),
ref->h_node_tex_array,
ref->tex_node_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
#if NODETEX
hipChannelFormatDesc nodeTextureDesc =
hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc,
ref->tex_node_height* sizeof(PixelOfNode));
#endif
if (ref->tex_children_height)
{
// Child tex, 1-dimensional
CUDA_MALLOC( (void**)(&ref->d_children_tex_array),
ref->tex_children_height * sizeof(PixelOfChildren));
CUDA_SAFE_CALL( hipMemcpy( (ref->d_children_tex_array),
ref->h_children_tex_array,
ref->tex_children_height * sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
#if CHILDTEX
hipChannelFormatDesc childTextureDesc =
hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array),
childTextureDesc, ref->tex_children_height* sizeof(PixelOfChildren));
#endif
}
#if TREE_ACCESS_HISTOGRAM
ref->bytes_on_board += ref->tex_node_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_node_hist),
ref->tex_node_height *sizeof(int));
CUDA_SAFE_CALL( hipMemset((ref->d_node_hist),0,
ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height)
{
ref->bytes_on_board += ref->tex_children_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_child_hist),
ref->tex_children_height *sizeof(int));
CUDA_SAFE_CALL( hipMemset((ref->d_child_hist),0,
ref->tex_children_height * sizeof(int)));
}
#endif
#endif
#if TWO_LEVEL_NODE_TREE
PixelOfNode node_buf[NODE_THRESH];
memset(node_buf, 0, sizeof(node_buf));
for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i)
{
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif MERGETEX
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x*2];
#else
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL( hipMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf)));
#endif
#if TWO_LEVEL_CHILD_TREE
PixelOfChildren child_buf[CHILD_THRESH];
memset(child_buf, 0, sizeof(child_buf));
for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i)
{
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[loc+1];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
child_buf[i]= ((PixelOfChildren*)(ref->h_children))[loc];
#elif MERGETEX
child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x*2+1];
#else
child_buf[i]= ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL( hipMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf)));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "done\n");
}
else {
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void dumpQueryBlockInfo(QuerySet* queries)
{
fprintf(stderr, "\tProcessing queries %s to %s\n",
queries->h_names[0],
queries->h_names[queries->count-1]);
}
void loadQueries(MatchContext* ctx)
{
QuerySet* queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
fprintf(stderr, "Allocating device memory for queries... ");
char* toboardtimer = createTimer();
startTimer(toboardtimer);
dumpQueryBlockInfo(queries);
CUDA_MALLOC((void**) &queries->d_tex_array, queries->texlen); \
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen,
hipMemcpyHostToDevice));
#if QRYTEX
qrytex.addressMode[0] = hipAddressModeClamp;
qrytex.filterMode = hipFilterModePoint;
qrytex.normalized = false; // access with normalized texture coordinates
hipChannelFormatDesc qryDesc =
hipCreateChannelDesc(8,0,0,0, hipChannelFormatKindUnsigned);
BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc,
queries->texlen);
#endif
CUDA_MALLOC((void**) &queries->d_addrs_tex_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_addrs_tex_array,
queries->h_addrs_tex_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
CUDA_MALLOC((void**) &queries->d_lengths_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_lengths_array,
queries->h_lengths_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
stopTimer(toboardtimer);
ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board);
}
else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries*sizeof(int) + queries->texlen);
}
}
void unloadQueries(MatchContext* ctx)
{
QuerySet* queries = ctx->queries;
CUDA_SAFE_CALL(hipFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
// Computes the location of the first MatchCoord for a given query. NOTE:
// Do NOT use this function if COALESCED_QUERIES == 1
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length)
{
return qry_addrs - qryid * (match_length + 1);
}
// Construct the offset table for a set of queries. This table will be used
// by the printing functions, and if COALESCED_QUERIES == 1, by the matching
// kernel.
void buildCoordOffsetArray(MatchContext* ctx,
int** h_coord_offset_array,
unsigned int* num_coords)
{
int numCoords = 0;
int match_length = ctx->min_match_length;
int numQueries = ctx->queries->count;
int* lengths = ctx->queries->h_lengths_array;
int* coord_offsets = (int*)calloc(numQueries, sizeof(int));
#if COALESCED_QUERIES
for (unsigned int i = 0; i < numQueries; i += WARP_SIZE)
{
// Every query in this warp will need at least this many coords
int max_num_coords = 0;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j)
{
int num_coords = lengths[i + j] - match_length + 1;
if ( max_num_coords < num_coords)
max_num_coords = num_coords;
}
unsigned int block_size = max_num_coords * WARP_SIZE;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j)
{
ctx->results.h_coord_tex_array[i + j] = numCoords + j;
}
numCoords += block_size;
}
#else
for (unsigned int i = 0; i < numQueries; ++i)
{
int qryoffset = ctx->queries->h_addrs_tex_array[i];
coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length);
}
if (numQueries > 0)
{
unsigned int last_qry = numQueries - 1;
unsigned int last_qry_len = lengths[last_qry] - match_length + 1;
numCoords = coord_offsets[last_qry] + last_qry_len;
fprintf(stderr, "Need %d match coords for this result array\n",
numCoords);
}
#endif
*num_coords = numCoords;
*h_coord_offset_array = coord_offsets;
}
void loadResultBuffer(MatchContext* ctx)
{
unsigned int numQueries = ctx->queries->count;
assert (numQueries);
char* offsettimer = createTimer();
startTimer(offsettimer);
buildCoordOffsetArray(ctx,
&(ctx->results.h_coord_tex_array),
&(ctx->results.numCoords));
stopTimer(offsettimer);
ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer);
deleteTimer(offsettimer);
unsigned int numCoords = ctx->results.numCoords;
fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",
numQueries, numCoords*sizeof(MatchCoord) );
size_t boardFreeMemory = 0;
size_t total_mem = 0;
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr,"board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord));
if (ctx->results.h_match_coords == NULL)
{
trap_dbg();
exit(EXIT_FAILURE);
}
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
ctx->results.bytes_on_board = 0;
CUDA_MALLOC( (void**) &ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL( hipMemset( (void*)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#if COALESCED_QUERIES
CUDA_MALLOC((void**) &ctx->results.d_coord_tex_array,
numQueries * sizeof(int));
ctx->results.bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( hipMemcpy((void*) ctx->results.d_coord_tex_array,
ctx->results.h_coord_tex_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ctx->results.d_match_coords = NULL;
}
fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext* ctx) {
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
ctx->results.d_match_coords = NULL;
ctx->results.bytes_on_board = 0;
#if COALESCED_QUERIES
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
#endif
}
void transferResultsFromDevice(MatchContext* ctx)
{
if (!ctx->on_cpu)
{
char* fromboardtimer = createTimer();
startTimer(fromboardtimer);
CUDA_SAFE_CALL(hipMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
hipMemcpyDeviceToHost) );
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_node_hist,
ctx->ref->d_node_hist,
ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int),
hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_child_hist,
ctx->ref->d_child_hist,
ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int),
hipMemcpyDeviceToHost) );
if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height)
{
int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int));
if (ctx->statistics.node_hist_size)
memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int));
ctx->statistics.node_hist = temp;
ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height;
}
if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height)
{
temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int));
if (ctx->statistics.hist_size)
memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int));
ctx->statistics.child_hist = temp;
ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height;
}
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
{
ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i];
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
{
ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i];
}
#endif
stopTimer(fromboardtimer);
ctx->statistics.t_match_coords_from_board += getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
}
}
int flushOutput();
int addToBuffer(char* string);
char numbuffer[32];
MatchCoord* coordForQueryChar(MatchContext* ctx,
unsigned int qryid,
unsigned int qrychar)
{
MatchResults* results = &(ctx->results);
MatchCoord* coords = results->h_match_coords;
#if COALESCED_QUERIES
return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE;
#else
return coords + results->h_coord_tex_array[qryid] + qrychar;
#endif
}
void coordsToPrintBuffers(MatchContext* ctx,
ReferencePage* page,
MatchInfo** matches,
Alignment** alignments,
unsigned int mem_avail,
unsigned int* coord_idx,
unsigned int* match_idx,
unsigned int* align_idx,
unsigned int* nextqry,
unsigned int* nextqrychar)
{
unsigned int numQueries = ctx->queries->count;
int match_length = ctx->min_match_length;
unsigned int cidx = *coord_idx;
unsigned int midx = 0;
unsigned int numCoords = ctx->results.numCoords;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
int DEBUG = 0;
if (DEBUG && cidx == 0)
{
for (int j = 0; j < numCoords; ++j)
{
MatchCoord * coord = ctx->results.h_match_coords+j;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
//fprintf(stdout, "node: %d\n",
// coord->node);
fprintf(stdout, "node: %d leaves:%d\n",
coord->node.data, lookupNumLeaves(page, coord->node));
}
}
exit(0);
}
// How much can we fit into mem_avail?
for (int j = cidx; j < numCoords; ++j)
{
MatchCoord* coord = ctx->results.h_match_coords + j;
int queryAlignments = 0;
int queryMatches = 0;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
int numLeaves = lookupNumLeaves(page, coord->node);
queryAlignments += numLeaves;
queryMatches++;
}
int allMatches = numMatches + queryMatches;
int allAlignments = numAlignments + queryAlignments;
int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment);
if (neededSize > mem_avail || (allMatches/BLOCKSIZE) >= MAX_GRID_DIMENSION)
{
// adding this match won't fit on the board
break;
}
++cidx;
numMatches = allMatches;
numAlignments = allAlignments;
}
MatchInfo* M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo));
unsigned int alignmentOffset = 0;
int qry = *nextqry;
int qrychar = *nextqrychar;
bool set_full = false;
while (qry < numQueries)
{
// h_lengths_array doesn't count the 'q' at the beginning of each query
int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length;
while (qrychar < qlen)
{
if (midx >= numMatches)
{
set_full = true;
break;
}
MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar);
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
MatchInfo m;
m.resultsoffset = alignmentOffset;
m.qrystartpos = qrychar;
m.matchnode = coord->node;
m.edgematch = coord->edge_match_length;
m.numLeaves = lookupNumLeaves(page, m.matchnode);
m.queryid = qry;
alignmentOffset += m.numLeaves;
M[midx++] = m;
}
++qrychar;
}
if (set_full)
break;
++qry;
qrychar = 0;
}
*coord_idx = cidx;
*match_idx = midx;
*align_idx = alignmentOffset;
*matches = M;
*nextqry = qry;
*nextqrychar = qrychar;
fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n", alignmentOffset * sizeof(Alignment), numAlignments);
*alignments = (struct Alignment *) calloc(alignmentOffset, sizeof(Alignment));
//hipHostMalloc((void**)alignments, numAlignments * sizeof(Alignment));
}
void runPrintKernel(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
MatchInfo* d_matches;
size_t matchesSize = numMatches * sizeof(MatchInfo);
CUDA_MALLOC((void**) &d_matches, matchesSize);
struct Alignment * d_alignments;
size_t alignmentSize = numAlignments * sizeof(Alignment);
CUDA_MALLOC((void**) &d_alignments, alignmentSize);
CUDA_SAFE_CALL(hipMemset((void*) d_alignments, 0, alignmentSize));
char* atimer = createTimer();
startTimer(atimer);
// Copy matches to card
fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments);
fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments);
int DEBUG = 0;
if (DEBUG)
{
for (int i = 0; i < numMatches; i++)
{
printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n",
i,
h_matches[i].resultsoffset,
h_matches[i].queryid,
h_matches[i].matchnode.data,
h_matches[i].numLeaves,
h_matches[i].edgematch,
h_matches[i].qrystartpos);
}
exit(0);
}
CUDA_SAFE_CALL(hipMemcpy(d_matches, h_matches, matchesSize, hipMemcpyHostToDevice));
stopTimer(atimer);
float mtime = getTimerValue(atimer);
// Launch the kernel
int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1);
fprintf(stderr, " Calling print kernel... ");
allocateReadWriteSets( dimGrid, dimBlock, 0 );
hipLaunchKernelGGL(( printKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, d_matches,
numMatches,
d_alignments,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int*)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode*)ctx->ref->d_node_tex_array,
#endif
#if !CHILDTEX
(_PixelOfChildren*)ctx->ref->d_children_tex_array,
#endif
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
page->begin,
page->end,
page->shadow_left,
page->shadow_right,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
, ctx->ref->d_node_hist,
ctx->ref->d_child_hist
#endif
);
freeReadWriteSets( dimGrid, dimBlock, 0 );
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if ( hipSuccess != err)
{
fprintf(stderr, "Kernel execution failed: %s.\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
startTimer(atimer);
// Copy the results back to the host
CUDA_SAFE_CALL(hipMemcpy((void*)alignments,
(void*)d_alignments,
alignmentSize,
hipMemcpyDeviceToHost));
hipDeviceSynchronize();
stopTimer(atimer);
float atime = getTimerValue(atimer);
fprintf(stderr, "memcpy time= %f\n", atime + mtime);
deleteTimer(atimer);
// Cleanup
CUDA_SAFE_CALL(hipFree(d_alignments));
CUDA_SAFE_CALL(hipFree(d_matches));
}
// TODO: need reverse-complement printing support
void runPrintOnCPU(MatchContext* ctx, ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
unsigned int min_match_length = ctx->min_match_length;
int* addrs = ctx->queries->h_addrs_tex_array;
int* lengths = ctx->queries->h_lengths_array;
char* qrychars = ctx->queries->h_tex_array;
if (!numMatches)
return;
int qry = -1;
unsigned int qrylen;
for (int i = 0; i < numMatches; ++i)
{
MatchInfo& match = h_matches[i];
if (match.queryid != qry)
{
qry = match.queryid;
qrylen = lengths[qry];
}
if (!(match.edgematch & FRMASK))
{
printAlignments(page,
alignments + match.resultsoffset,
#if COALESCED_QUERIES
qrychars + sizeof(int) * addrs[qry],
#else
qrychars + addrs[qry],
#endif
qrylen,
match.matchnode,
match.qrystartpos,
match.edgematch,
min_match_length,
0,
ctx->forwardcoordinates);
}
}
}
int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen);
void getExactAlignments(MatchContext * ctx, ReferencePage * page, bool on_cpu)
{
assert(!ctx->reverse && !ctx->forwardreverse);
size_t boardFreeMemory;
size_t total_mem;
if (!on_cpu)
{
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
}
else
{
boardFreeMemory = 256 * 1024 * 1024;
total_mem = boardFreeMemory;
}
#ifdef __DEVICE_EMULATION__
boardFreeMemory = 512 * 1024 * 1024;
#endif
boardFreeMemory -= BREATHING_ROOM;
fprintf(stderr, "board free memory: %u\n", boardFreeMemory);
int rTotalMatches = 0;
int rTotalAlignments = 0;
int totalRounds = 0;
unsigned int last_coord = ctx->results.numCoords;
unsigned int next_coord = 0;
unsigned int nextqry = 0;
unsigned int nextqrychar = 0;
int lastqry = -1;
while (next_coord < last_coord)
{
// see how many queries will fit on the board
totalRounds++;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
MatchInfo* h_matches = NULL;
Alignment* h_alignments = NULL;
int coord_left = next_coord;
char* btimer = createTimer();
startTimer(btimer);
coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory,
&next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar);
stopTimer(btimer);
float btime = getTimerValue(btimer);
ctx->statistics.t_coords_to_buffers += btime;
fprintf(stderr, "buffer prep time= %f\n", btime);
deleteTimer(btimer);
fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n",
totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments);
if (numMatches == 0)
continue;
char buf[256];
//assert(qryend > qrystart);
rTotalAlignments += numAlignments;
rTotalMatches += numMatches;
if (num_bind_tex_calls > 100)
{
hipDeviceReset();
num_bind_tex_calls = 0;
loadReference(ctx);
loadQueries(ctx);
}
char* ktimer = createTimer();
startTimer(ktimer);
if (on_cpu)
{
runPrintOnCPU(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
else
{
runPrintKernel(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_print_kernel += ktime;
fprintf(stderr, "print kernel time= %f\n", ktime);
deleteTimer(ktimer);
// char* stimer = createTimer();
// startTimer(stimer);
// mapQueriesEndToEnd(ctx,
// page,
// h_matches,
// numMatches,
// h_alignments,
// numAlignments);
//
// stopTimer(stimer);
//
// float stime = getTimerValue(stimer);
// fprintf(stderr, "postprocess time= %f\n", stime);
// deleteTimer(stimer);
//flushOutput();
//Process the alignments
char* otimer = createTimer();
startTimer(otimer);
for (int m = 0; m < numMatches; m++)
{
int base = h_matches[m].resultsoffset;
for (int i = 0; i < h_matches[m].numLeaves; i++)
{
// See if there are any more left maximal alignments for this match
if (h_alignments[base+i].left_in_ref == 0)
{
break;
}
if (h_matches[m].queryid != lastqry)
{
lastqry = h_matches[m].queryid;
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + lastqry));
addToBuffer("\n");
}
sprintf(buf, "%d\t%d\t%d\n",
h_alignments[base+i].left_in_ref,
h_matches[m].qrystartpos + 1,
h_alignments[base+i].matchlen);
addToBuffer(buf);
// addMatchToBuffer(h_alignments[base+i].left_in_ref,
// h_matches[m].qrystartpos + 1,
// h_alignments[base+i].matchlen);
}
}
flushOutput();
stopTimer(otimer);
ctx->statistics.t_results_to_disk += getTimerValue(otimer);
deleteTimer(otimer);
free(h_matches);
free(h_alignments);
//hipHostFree((void*)h_alignments);
}
free(ctx->results.h_coord_tex_array);
free(ctx->results.h_match_coords);
ctx->results.h_coord_tex_array = NULL;
ctx->results.h_match_coords = NULL;
fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n",
rTotalMatches, rTotalAlignments, totalRounds);
}
int getQueryBlock(MatchContext* ctx, size_t device_mem_avail)
{
QuerySet* queries = ctx->queries;
char * queryTex = NULL;
int* queryAddrs = NULL;
int* queryLengths = NULL;
unsigned int numQueries;
unsigned int num_match_coords;
size_t queryLen;
char** names;
fprintf(stderr, "Loading query block... ");
char* queryreadtimer = createTimer();
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile,
&queryTex,
&queryLen,
&queryAddrs,
&names,
&queryLengths,
&numQueries,
&num_match_coords,
device_mem_avail,
ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
ctx->results.numCoords = num_match_coords;
fprintf(stderr, "done.\n");
return numQueries;
}
void destroyQueryBlock(QuerySet* queries)
{
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void resetStats(Statistics* stats)
{
stats->t_end_to_end = 0.0;
stats->t_match_kernel = 0.0;
stats->t_print_kernel = 0.0;
stats->t_queries_to_board = 0.0;
stats->t_match_coords_to_board = 0.0;
stats->t_match_coords_from_board = 0.0;
stats->t_tree_to_board = 0.0;
stats->t_ref_str_to_board = 0.0;
stats->t_queries_from_disk = 0.0;
stats->t_ref_from_disk = 0.0;
stats->t_results_to_disk = 0.0;
stats->t_tree_construction = 0.0;
stats->t_tree_reorder = 0.0;
stats->t_tree_flatten = 0.0;
stats->t_reorder_ref_str = 0.0;
stats->t_build_coord_offsets = 0.0;
stats->t_coords_to_buffers = 0.0;
stats->bp_avg_query_length = 0.0;
#if TREE_ACCESS_HISTOGRAM
if (stats->node_hist_size)
{
free(stats->node_hist);
stats->node_hist = NULL;
stats->node_hist_size = 0;
}
if (stats->child_hist_size)
{
free(stats->child_hist);
stats->child_hist = NULL;
stats->child_hist_size = 0;
}
#endif
}
void writeStatisticsFile(Statistics* stats,
char* stats_filename,
char* node_hist_filename = NULL,
char* child_hist_filename = NULL)
{
if (stats_filename)
{
FILE* f = fopen(stats_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename);
}
else
{
fprintf(f, "Q");
fprintf(f, ",R");
fprintf(f, ",T");
fprintf(f, ",m");
fprintf(f, ",r");
fprintf(f, ",t");
fprintf(f, ",n");
fprintf(f, ",Total");
fprintf(f, ",Match kernel");
fprintf(f, ",Print Kernel");
fprintf(f, ",Queries to board");
fprintf(f, ",Match coords to board");
fprintf(f, ",Match coords from board");
fprintf(f, ",Tree to board");
fprintf(f, ",Ref str to board");
fprintf(f, ",Queries from disk");
fprintf(f, ",Ref from disk");
fprintf(f, ",Output to disk");
fprintf(f, ",Tree construction");
fprintf(f, ",Tree reorder");
fprintf(f, ",Tree flatten");
fprintf(f, ",Ref reorder");
fprintf(f, ",Build coord table");
fprintf(f, ",Coords to buffers");
fprintf(f, ",Avg qry length");
fprintf(f, "\n");
fprintf(f, "%d", QRYTEX);
fprintf(f, ",%d", REFTEX);
fprintf(f, ",%d", TREETEX);
fprintf(f, ",%d", MERGETEX);
fprintf(f, ",%d", REORDER_REF);
fprintf(f, ",%d", REORDER_TREE);
fprintf(f, ",%d", RENUMBER_TREE);
fprintf(f, ",%f", stats->t_end_to_end);
fprintf(f, ",%f", stats->t_match_kernel);
fprintf(f, ",%f", stats->t_print_kernel);
fprintf(f, ",%f", stats->t_queries_to_board);
fprintf(f, ",%f", stats->t_match_coords_to_board);
fprintf(f, ",%f", stats->t_match_coords_from_board);
fprintf(f, ",%f", stats->t_tree_to_board);
fprintf(f, ",%f", stats->t_ref_str_to_board);
fprintf(f, ",%f", stats->t_queries_from_disk);
fprintf(f, ",%f", stats->t_ref_from_disk);
fprintf(f, ",%f", stats->t_results_to_disk);
fprintf(f, ",%f", stats->t_tree_construction);
fprintf(f, ",%f", stats->t_tree_reorder);
fprintf(f, ",%f", stats->t_tree_flatten);
fprintf(f, ",%f", stats->t_reorder_ref_str);
fprintf(f, ",%f", stats->t_build_coord_offsets);
fprintf(f, ",%f", stats->t_coords_to_buffers);
fprintf(f, ",%f", stats->bp_avg_query_length);
fprintf(f,"\n");
fclose(f);
}
}
#if TREE_ACCESS_HISTOGRAM
if (node_hist_filename)
{
FILE* f = fopen(node_hist_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename);
}
else
{
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]);
}
}
if (child_hist_filename)
{
FILE* f = fopen(child_hist_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename);
}
else
{
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]);
}
}
float total_node_hits = 0;
float tree_top_node_hits = 0;
float total_child_hits = 0;
float tree_top_child_hits = 0;
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
{
total_node_hits +=ctx->statistics.node_hist[i];
if (i < 256) { tree_top_node_hits += ctx->statistics.node_hist[i]; }
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
{
total_child_hits +=ctx->statistics.child_hist[i];
if (i < 256) { tree_top_child_hits += ctx->statistics.child_hist[i]; }
}
fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n",(int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits /total_node_hits);
fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n",(int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits /total_child_hits);
#endif
}
void matchOnCPU(MatchContext* ctx, bool doRC)
{
//TODO: CPU is matching is disabled.
if (doRC) {
// Match the reverse complement of the queries to the ref
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
FORWARD);
}
}
void matchOnGPU(MatchContext* ctx, bool doRC)
{
int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1);
// Match the reverse complement of the queries to the ref
if (doRC) {
//TODO: GPU RC is disabled
allocateReadWriteSets( dimGrid, dimBlock, 0 );
hipLaunchKernelGGL(( mummergpuRCKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
freeReadWriteSets( dimGrid, dimBlock, 0 );
}
else {
allocateReadWriteSets( dimGrid, dimBlock, 0 );
hipLaunchKernelGGL(( mummergpuKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int*)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode*)(ctx->ref->d_node_tex_array),
#endif
#if !CHILDTEX
(_PixelOfChildren*)(ctx->ref->d_children_tex_array),
#endif
#if !REFTEX
(char*)ctx->ref->d_ref_array,
#endif
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
, ctx->ref->d_node_hist,
ctx->ref->d_child_hist
#endif
);
freeReadWriteSets( dimGrid, dimBlock, 0 );
}
// check if kernel execution generated an error
hipError_t err = hipGetLastError();
if ( hipSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void getMatchResults(MatchContext* ctx,
unsigned int page_num)
{
transferResultsFromDevice(ctx);
}
void matchQueryBlockToReferencePage(MatchContext* ctx,
ReferencePage* page,
bool reverse_complement)
{
char* ktimer = createTimer();
fprintf(stderr, "Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
ctx->queries->bytes_on_board,
ctx->ref->bytes_on_board,
ctx->results.bytes_on_board);
startTimer(ktimer);
if (ctx->on_cpu)
{
matchOnCPU(ctx, reverse_complement);
}
else
{
matchOnGPU(ctx, reverse_complement);
hipDeviceSynchronize();
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_match_kernel += ktime;
fprintf(stderr, "match kernel time= %f\n", ktime);
deleteTimer(ktimer);
getMatchResults(ctx, page->id);
unloadResultBuffer(ctx);
}
int matchSubset(MatchContext* ctx,
ReferencePage* page)
{
loadQueries(ctx);
fprintf(stderr,
"Matching queries %s - %s against ref coords %d - %d\n",
ctx->queries->h_names[0],
ctx->queries->h_names[ctx->queries->count - 1],
page->begin,
page->end);
loadResultBuffer(ctx);
// TODO: renable RC support by calling this twice /w reverse/fwdreverse
// idiom.
matchQueryBlockToReferencePage(ctx, page, false);
if (USE_PRINT_KERNEL && !ctx->on_cpu)
{
getExactAlignments(ctx, page, false);
}
else
{
getExactAlignments(ctx, page, true);
}
flushOutput();
unloadQueries(ctx);
return 0;
}
int getFreeDeviceMemory(bool on_cpu)
{
size_t free_mem = 0;
size_t total_mem = 0;
// We have to 'prime' CUDA by making an allocation here. cuMemGetInfo
// will return zeroes until we do a malloc.
int * p = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&p, sizeof(int)));
CUDA_SAFE_CALL(hipFree(p));
if (!on_cpu) {
boardMemory(&free_mem, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
free_mem, total_mem);
}
else {
total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX
}
return free_mem;
}
int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page)
{
fprintf(stderr, "Beginning reference page %p\n", page);
int free_mem = getFreeDeviceMemory(ctx->on_cpu);
int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM;
ctx->ref = &(page->ref);
loadReference(ctx);
while (getQueryBlock(ctx, available_mem)) {
matchSubset(ctx, page);
ctx->statistics.bp_avg_query_length =
ctx->queries->texlen / (float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
if (num_bind_tex_calls > 100)
{
hipDeviceReset();
num_bind_tex_calls = 0;
loadReference(ctx);
}
}
unloadReferenceString(ctx->ref);
unloadReferenceTree(ctx);
lseek(ctx->queries->qfile, 0, SEEK_SET);
return 0;
}
void initReferencePages( MatchContext* ctx , int* num_pages, ReferencePage** pages_out) {
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ?
BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size);
fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n",
num_reference_pages, bases_in_ref, page_size);
unsigned int page_overlap = MAX_QUERY_LEN + 1;
ReferencePage* pages = (ReferencePage*) calloc(num_reference_pages,
sizeof(ReferencePage));
pages[0].begin = 1;
pages[0].end = pages[0].begin +
page_size +
ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i-1].end;
pages[i].id = i;
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
}
*pages_out = pages;
*num_pages = num_reference_pages;
}
int streamReferenceAgainstQueries(MatchContext* ctx) {
int num_reference_pages = 0;
ReferencePage* pages = NULL;
initReferencePages(ctx, &num_reference_pages, &pages);
buildReferenceTexture(&(pages[0].ref),
ctx->full_ref,
pages[0].begin,
pages[0].end,
ctx->min_match_length,
ctx->dotfilename,
ctx->texfilename,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[0]);
destroyReference(&(pages[0].ref));
for (int i = 1; i < num_reference_pages - 1; ++i) {
buildReferenceTexture(&(pages[i].ref),
ctx->full_ref,
pages[i].begin,
pages[i].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[i]);
destroyReference(&(pages[i].ref));
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
buildReferenceTexture(&(pages[last_page].ref),
ctx->full_ref,
pages[last_page].begin,
pages[last_page].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[last_page]);
destroyReference(&(pages[last_page].ref));
}
free(pages);
return 0;
}
extern "C"
int matchQueries(MatchContext* ctx) {
assert(sizeof(struct PixelOfNode) == sizeof(uint4));
assert(sizeof(struct PixelOfChildren) == sizeof(uint4));
#if TREE_ACCESS_HISTOGRAM
ctx->statistics.node_hist_size = 0;
ctx->statistics.child_hist_size = 0;
#endif
resetStats(&(ctx->statistics));
char* ttimer = createTimer();
startTimer(ttimer);
int ret;
fprintf(stderr, "Streaming reference pages against all queries\n");
ret = streamReferenceAgainstQueries(ctx);
stopTimer(ttimer);
ctx->statistics.t_end_to_end += getTimerValue(ttimer);
deleteTimer(ttimer);
writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out");
return ret;
}
| 3fce314da538e4e84ee1004fda2255dd16e19d94.cu | #include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include <sys/time.h>
#include <cuda.h>
#include <vector_types.h>
// includes, kernels
#include <common.cu>
#include <mummergpu.h>
#include <mummergpu_kernel.cu>
int USE_PRINT_KERNEL = 1;
#define BREATHING_ROOM (16 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 8388608
//#define BASES_PER_TREE_PAGE 7000000
#define BLOCKSIZE 256
unsigned int cuda_calls = 0;
void trap_dbg()
{
fprintf(stderr, "Trapped\n");
}
#define CUDA_SAFE_CALL( call) do { \
cuda_calls++; \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \
__FILE__, __LINE__, err, cudaGetErrorString( err) ); \
trap_dbg(); \
exit(EXIT_FAILURE); \
} } while (0)
# define CU_SAFE_CALL_NO_SYNC( call ) do { \
CUresult err = call; \
if( CUDA_SUCCESS != err) { \
fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \
err, __FILE__, __LINE__ ); \
exit(EXIT_FAILURE); \
} } while (0)
# define CUT_DEVICE_INIT_DRV(cuDevice) do { \
cuDevice = 0; \
int deviceCount = 0; \
CUresult err = cuInit(0); \
if (CUDA_SUCCESS == err) \
CU_SAFE_CALL_NO_SYNC(cuDeviceGetCount(&deviceCount)); \
if (deviceCount == 0) { \
fprintf(stderr, "There is no device.\n"); \
exit(EXIT_FAILURE); \
} \
int dev; \
for (dev = 0; dev < deviceCount; ++dev) { \
int major, minor; \
CU_SAFE_CALL_NO_SYNC(cuDeviceComputeCapability(&major, &minor, dev));\
if (major >= 1) \
break; \
} \
if (dev == deviceCount) { \
fprintf(stderr, "There is no device supporting CUDA.\n"); \
exit(EXIT_FAILURE); \
} \
else \
CU_SAFE_CALL_NO_SYNC(cuDeviceGet(&cuDevice, dev)); \
} while (0)
unsigned int num_bind_tex_calls = 0;
#define BIND_TEX(offset, tex, arr, desc, len) do { \
CUDA_SAFE_CALL(cudaBindTexture(offset, tex, arr, desc, len)); \
++num_bind_tex_calls; \
} while(0)
#define BIND_TEX_ARRAY(tex, arr, desc) do { \
CUDA_SAFE_CALL(cudaBindTextureToArray(tex, arr, desc)); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC(ptr, size) do { \
cudaMalloc(ptr, size); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) do { \
cudaMallocPitch(ptr, out_pitch, rowsize, numrows); \
++num_bind_tex_calls; \
} while(0)
#define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) do { \
cudaMallocArray(ptr, desc, pitch, rows); \
++num_bind_tex_calls; \
} while(0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
void computeGold(MatchResults* results,
char* refstr,
char* queries,
int* queryAddrs,
int* queryLengths,
PixelOfNode* nodeTexture,
PixelOfChildren* childrenTexture,
int numQueries,
int mismatch_length,
int rc);
extern "C"
void getReferenceString(const char * filename, char** refstr, size_t* reflen);
extern "C"
void createTreeTexture(const char * filename,
PixelOfNode** nodeTexture,
PixelOfChildren** childrenTexture,
unsigned int* width,
unsigned int* node_height,
unsigned int* children_height,
AuxiliaryNodeData** aux_data,
int* num_match_coords,
int min_match_len,
Statistics* statistics,
const char * dotfilename,
const char * texfilename);
extern "C"
void getQueriesTexture(int qfile,
char** queryTexture,
size_t* queryLength,
int** queryAddrs,
char*** queryNames,
int** queryLengths,
unsigned int* numQueries,
unsigned int* num_match_coords,
unsigned int device_memory_avail,
int min_match_length,
bool rc);
extern "C"
int lookupNumLeaves(ReferencePage * page, TextureAddress addr);
void printAlignments(ReferencePage* page,
Alignment* alignments,
char* query,
int qrylen,
TextureAddress nodeid,
int qrypos,
int edge_depth,
int min_match,
bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
extern "C"
void mapQueriesEndToEnd(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* h_alignments,
unsigned int numAligments);
char * createTimer()
{
unsigned int * ptr = (unsigned int *) malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
return (char *) ptr;
}
void startTimer(char * ptr)
{
gettimeofday(&(((struct Timer_t *)ptr)->start_m), NULL);
}
void stopTimer(char * ptr)
{
gettimeofday(&(((struct Timer_t *)ptr)->end_m), NULL);
}
float getTimerValue(char * ptr)
{
Timer_t * timer = (Timer_t*) ptr;
if (timer == NULL)
{
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) { stopTimer(ptr); }
return (float) (1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec)
+ (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(char * ptr)
{
free((Timer_t *)ptr);
}
extern "C"
int createReference(const char* fromFile, Reference* ref)
{
if (!fromFile || !ref)
return -1;
char * loadreftimer = createTimer();
startTimer(loadreftimer);
getReferenceString(fromFile, &(ref->str), &(ref->len));
stopTimer(loadreftimer);
ref->t_load_from_disk += getTimerValue(loadreftimer);
deleteTimer(loadreftimer);
return 0;
}
extern "C"
int destroyReference(Reference* ref)
{
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
#if REORDER_REF
free(ref->h_ref_array);
#endif
free(ref->aux_data);
#if TREE_ACCESS_HISTOGRAM
free(ref->h_node_hist);
free(ref->h_child_hist);
#endif
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C"
int createQuerySet(const char* fromFile, QuerySet* queries)
{
fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1)
{
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit (1);
}
queries->qfile = qfile;
return 0;
}
extern "C"
int destroyQuerySet(QuerySet* queries)
{
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C"
void printStringForError(int err)
{
}
extern "C"
int createMatchContext(Reference* ref,
QuerySet* queries,
MatchResults* matches,
bool on_cpu,
int min_match_length,
char* stats_file,
bool reverse,
bool forwardreverse,
bool forwardcoordinates,
bool showQueryLength,
char* dotfilename,
char* texfilename,
MatchContext* ctx) {
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
ctx->on_cpu = on_cpu;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
ctx->dotfilename = dotfilename;
ctx->texfilename = texfilename;
return 0;
}
extern "C"
int destroyMatchContext(MatchContext* ctx)
{
free(ctx->full_ref);
//destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
return 0;
}
void buildReferenceTexture(Reference* ref,
char* full_ref,
size_t begin,
size_t end,
int min_match_len,
char* dotfilename,
char* texfilename,
Statistics* statistics)
{
fprintf(stderr, "Building reference texture...\n");
PixelOfNode* nodeTexture = NULL;
PixelOfChildren * childrenTexture = NULL;
unsigned int width = 0;
unsigned int node_height = 0;
unsigned int children_height = 0;
AuxiliaryNodeData* aux_data = NULL;
int num_nodes;
char * loadreftimer = createTimer();
startTimer(loadreftimer);
ref->len = end - begin + 3;
ref->str = (char*)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
stopTimer(loadreftimer);
statistics->t_ref_from_disk += getTimerValue(loadreftimer) + ref->t_load_from_disk;
deleteTimer(loadreftimer);
createTreeTexture(ref->str,
&nodeTexture,
&childrenTexture,
&width,
&node_height,
&children_height,
&aux_data,
&num_nodes,
min_match_len,
statistics,
dotfilename,
texfilename);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_node_height = node_height;
ref->tex_children_height = children_height;
#if TREE_ACCESS_HISTOGRAM
ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int));
ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int));
#endif
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) +
(width * children_height * sizeof(PixelOfChildren));
fprintf(stderr, "This tree will need %d bytes on the board\n", ref->bytes_on_board);
#if REORDER_REF
char * reordertimer = createTimer();
startTimer(reordertimer);
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
int refstrsize = numrows * refpitch;
ref->h_ref_array = (char *) malloc(refstrsize);
ref->bytes_on_board += refstrsize;
fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize);
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) {
ref->h_ref_array[z] = 'Z';
}
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char* refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++) {
int bigx = i % (block_dim); // ref string reorder
int bigy = i / (block_dim);
y = bigy * blocksize + bigx % blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_array[y*refpitch+x] = refstr[i];
if (x > maxx) {
maxx = x;
}
if (y > maxy) {
maxy = y;
}
}
if ((maxx >= refpitch) || (maxy >= numrows)) {
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
stopTimer(reordertimer);
if (statistics)
statistics->t_reorder_ref_str += getTimerValue(reordertimer);
deleteTimer(reordertimer);
#else
fprintf(stderr, "The refstr requires %d bytes\n", ref->len);
ref->bytes_on_board += ref->len;
#endif
}
void boardMemory(size_t * free_mem, size_t * total_mem)
{
// The emulator doesn't allow calls to cuMemGetInfo
#ifdef __DEVICE_EMULATION__
*free_mem = 512*1024*1024;
*total_mem = 768*1024*1024;
#else
CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem));
#endif
}
void loadReferenceTexture(MatchContext* ctx)
{
Reference* ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
cudaChannelFormatDesc refTextureDesc =
cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSigned);
if (!ctx->on_cpu) {
char * toboardtimer = createTimer();
startTimer(toboardtimer);
#if REFTEX
#if REORDER_REF
CUDA_MALLOC_ARRAY((cudaArray**)(&ref->d_ref_array),
&refTextureDesc,
ref->pitch,
numrows);
CUDA_SAFE_CALL(cudaMemcpyToArray( (cudaArray*)(ref->d_ref_array),
0,
0,
ref->h_ref_array,
numrows*ref->pitch,
cudaMemcpyHostToDevice));
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.addressMode[1] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false;
BIND_TEX_ARRAY(reftex, (cudaArray*)ref->d_ref_array, refTextureDesc);
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL( cudaMemcpy( (void*)(ref->d_ref_array),
ref->str,
ref->len,
cudaMemcpyHostToDevice) );
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false; // access with normalized texture coordinates
cudaChannelFormatDesc refDesc =
cudaCreateChannelDesc(8,0,0,0, cudaChannelFormatKindUnsigned);
BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len);
ctx->ref->bytes_on_board += ref->len;
#endif
#else
#if REORDER_REF
size_t refpitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_ref_array),
&refpitch,
ref->pitch * sizeof(char),
numrows);
CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_ref_array),
refpitch,
ref->h_ref_array,
ref->pitch ,
ref->pitch * sizeof(char),
numrows,
cudaMemcpyHostToDevice));
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL( cudaMemcpy( (void*)(ref->d_ref_array),
ref->str,
ref->len,
cudaMemcpyHostToDevice) );
ctx->ref->bytes_on_board += ref->len;
#endif
#endif
stopTimer(toboardtimer);
ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ref->d_ref_array = NULL;
}
}
void unloadReferenceString(Reference* ref)
{
#if REFTEX
CUDA_SAFE_CALL(cudaUnbindTexture( reftex ) );
#endif
#if REORDER_REF && REFTEX
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_ref_array)));
#else
CUDA_SAFE_CALL(cudaFree((ref->d_ref_array)));
#endif
ref->d_ref_array = NULL;
}
void unloadReferenceTree(MatchContext* ctx)
{
Reference* ref = ctx->ref;
#if REORDER_TREE
// Unload nodetex
#if NODETEX
CUDA_SAFE_CALL(cudaUnbindTexture( nodetex ) );
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_node_tex_array)));
#else
CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array));
#endif
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array)
{
#if CHILDTEX
CUDA_SAFE_CALL(cudaUnbindTexture( childrentex ) );
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_children_tex_array)));
#else
CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array));
#endif
}
ref->d_children_tex_array = NULL;
#else
#if NODETEX
CUDA_SAFE_CALL(cudaUnbindTexture( nodetex ) );
#endif
CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array));
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array)
{
#if CHILDTEX
CUDA_SAFE_CALL(cudaUnbindTexture( childrentex ) );
#endif
CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array));
ref->d_children_tex_array = NULL;
}
#endif
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(cudaFree(ref->d_node_hist));
ref->d_node_hist = NULL;
CUDA_SAFE_CALL(cudaFree(ref->d_child_hist));
ref->d_child_hist = NULL;
#endif
}
//loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext* ctx) {
Reference* ref = ctx->ref;
ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
char * toboardtimer = createTimer();
startTimer(toboardtimer);
// node texels
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode));
// children texels
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren);
#if REORDER_TREE
#if NODETEX
cudaChannelFormatDesc nodeTextureDesc =
cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY( (cudaArray**)(&ref->d_node_tex_array),
&nodeTextureDesc,
ref->tex_width,
ref->tex_node_height );
CUDA_SAFE_CALL( cudaMemcpyToArray( (cudaArray*)(ref->d_node_tex_array),
0,
0,
ref->h_node_tex_array,
ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.addressMode[1] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(nodetex, (cudaArray*)ref->d_node_tex_array,
nodeTextureDesc);
#else
size_t nodepitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_node_tex_array),
&nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height );
CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_node_tex_array),
nodepitch,
ref->h_node_tex_array,
nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height,
cudaMemcpyHostToDevice));
#endif
if (ref->tex_children_height)
{
#if CHILDTEX
cudaChannelFormatDesc childrenTextureDesc =
cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY( (cudaArray**)(&ref->d_children_tex_array),
&childrenTextureDesc,
ref->tex_width,
ref->tex_children_height );
CUDA_SAFE_CALL( cudaMemcpyToArray((cudaArray*)(ref->d_children_tex_array),
0,
0,
ref->h_children_tex_array,
ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.addressMode[1] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(childrentex, (cudaArray*)(ref->d_children_tex_array),
childrenTextureDesc);
#else
size_t childpitch;
CUDA_MALLOC_PITCH( (void**)(&ref->d_children_tex_array),
&childpitch,
ref->tex_width * sizeof(PixelOfChildren),
ref->tex_children_height );
CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_children_tex_array),
childpitch,
ref->h_children_tex_array,
childpitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_children_height,
cudaMemcpyHostToDevice));
#endif
}
#if TREE_ACCESS_HISTOGRAM
// node hist
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_node_hist),
ref->tex_width * ref->tex_node_height *sizeof(int));
CUDA_SAFE_CALL( cudaMemset((ref->d_node_hist),0,
ref->tex_width * ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height)
{
// children hist
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int);
fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n", ref->bytes_on_board);
CUDA_MALLOC( (void**)(&ref->d_child_hist),
ref->tex_width * ref->tex_children_height *sizeof(int));
CUDA_SAFE_CALL( cudaMemset((ref->d_child_hist),0,
ref->tex_width * ref->tex_children_height * sizeof(int)));
}
#endif
#else // NO TREE REORDERING
// Node tex, 1-dimensional
CUDA_MALLOC( (void**)(&ref->d_node_tex_array),
ref->tex_node_height * sizeof(PixelOfNode));
CUDA_SAFE_CALL( cudaMemcpy( (ref->d_node_tex_array),
ref->h_node_tex_array,
ref->tex_node_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
#if NODETEX
cudaChannelFormatDesc nodeTextureDesc =
cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc,
ref->tex_node_height* sizeof(PixelOfNode));
#endif
if (ref->tex_children_height)
{
// Child tex, 1-dimensional
CUDA_MALLOC( (void**)(&ref->d_children_tex_array),
ref->tex_children_height * sizeof(PixelOfChildren));
CUDA_SAFE_CALL( cudaMemcpy( (ref->d_children_tex_array),
ref->h_children_tex_array,
ref->tex_children_height * sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
#if CHILDTEX
cudaChannelFormatDesc childTextureDesc =
cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array),
childTextureDesc, ref->tex_children_height* sizeof(PixelOfChildren));
#endif
}
#if TREE_ACCESS_HISTOGRAM
ref->bytes_on_board += ref->tex_node_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_node_hist),
ref->tex_node_height *sizeof(int));
CUDA_SAFE_CALL( cudaMemset((ref->d_node_hist),0,
ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height)
{
ref->bytes_on_board += ref->tex_children_height * sizeof(int);
CUDA_MALLOC( (void**)(&ref->d_child_hist),
ref->tex_children_height *sizeof(int));
CUDA_SAFE_CALL( cudaMemset((ref->d_child_hist),0,
ref->tex_children_height * sizeof(int)));
}
#endif
#endif
#if TWO_LEVEL_NODE_TREE
PixelOfNode node_buf[NODE_THRESH];
memset(node_buf, 0, sizeof(node_buf));
for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i)
{
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif MERGETEX
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x*2];
#else
node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL( cudaMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf)));
#endif
#if TWO_LEVEL_CHILD_TREE
PixelOfChildren child_buf[CHILD_THRESH];
memset(child_buf, 0, sizeof(child_buf));
for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i)
{
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[loc+1];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION;
child_buf[i]= ((PixelOfChildren*)(ref->h_children))[loc];
#elif MERGETEX
child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x*2+1];
#else
child_buf[i]= ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL( cudaMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf)));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "done\n");
}
else {
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void dumpQueryBlockInfo(QuerySet* queries)
{
fprintf(stderr, "\tProcessing queries %s to %s\n",
queries->h_names[0],
queries->h_names[queries->count-1]);
}
void loadQueries(MatchContext* ctx)
{
QuerySet* queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
fprintf(stderr, "Allocating device memory for queries... ");
char* toboardtimer = createTimer();
startTimer(toboardtimer);
dumpQueryBlockInfo(queries);
CUDA_MALLOC((void**) &queries->d_tex_array, queries->texlen); \
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen,
cudaMemcpyHostToDevice));
#if QRYTEX
qrytex.addressMode[0] = cudaAddressModeClamp;
qrytex.filterMode = cudaFilterModePoint;
qrytex.normalized = false; // access with normalized texture coordinates
cudaChannelFormatDesc qryDesc =
cudaCreateChannelDesc(8,0,0,0, cudaChannelFormatKindUnsigned);
BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc,
queries->texlen);
#endif
CUDA_MALLOC((void**) &queries->d_addrs_tex_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_addrs_tex_array,
queries->h_addrs_tex_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_MALLOC((void**) &queries->d_lengths_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_lengths_array,
queries->h_lengths_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
stopTimer(toboardtimer);
ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board);
}
else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries*sizeof(int) + queries->texlen);
}
}
void unloadQueries(MatchContext* ctx)
{
QuerySet* queries = ctx->queries;
CUDA_SAFE_CALL(cudaFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
// Computes the location of the first MatchCoord for a given query. NOTE:
// Do NOT use this function if COALESCED_QUERIES == 1
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length)
{
return qry_addrs - qryid * (match_length + 1);
}
// Construct the offset table for a set of queries. This table will be used
// by the printing functions, and if COALESCED_QUERIES == 1, by the matching
// kernel.
void buildCoordOffsetArray(MatchContext* ctx,
int** h_coord_offset_array,
unsigned int* num_coords)
{
int numCoords = 0;
int match_length = ctx->min_match_length;
int numQueries = ctx->queries->count;
int* lengths = ctx->queries->h_lengths_array;
int* coord_offsets = (int*)calloc(numQueries, sizeof(int));
#if COALESCED_QUERIES
for (unsigned int i = 0; i < numQueries; i += WARP_SIZE)
{
// Every query in this warp will need at least this many coords
int max_num_coords = 0;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j)
{
int num_coords = lengths[i + j] - match_length + 1;
if ( max_num_coords < num_coords)
max_num_coords = num_coords;
}
unsigned int block_size = max_num_coords * WARP_SIZE;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j)
{
ctx->results.h_coord_tex_array[i + j] = numCoords + j;
}
numCoords += block_size;
}
#else
for (unsigned int i = 0; i < numQueries; ++i)
{
int qryoffset = ctx->queries->h_addrs_tex_array[i];
coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length);
}
if (numQueries > 0)
{
unsigned int last_qry = numQueries - 1;
unsigned int last_qry_len = lengths[last_qry] - match_length + 1;
numCoords = coord_offsets[last_qry] + last_qry_len;
fprintf(stderr, "Need %d match coords for this result array\n",
numCoords);
}
#endif
*num_coords = numCoords;
*h_coord_offset_array = coord_offsets;
}
void loadResultBuffer(MatchContext* ctx)
{
unsigned int numQueries = ctx->queries->count;
assert (numQueries);
char* offsettimer = createTimer();
startTimer(offsettimer);
buildCoordOffsetArray(ctx,
&(ctx->results.h_coord_tex_array),
&(ctx->results.numCoords));
stopTimer(offsettimer);
ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer);
deleteTimer(offsettimer);
unsigned int numCoords = ctx->results.numCoords;
fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",
numQueries, numCoords*sizeof(MatchCoord) );
size_t boardFreeMemory = 0;
size_t total_mem = 0;
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr,"board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord));
if (ctx->results.h_match_coords == NULL)
{
trap_dbg();
exit(EXIT_FAILURE);
}
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
ctx->results.bytes_on_board = 0;
CUDA_MALLOC( (void**) &ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL( cudaMemset( (void*)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#if COALESCED_QUERIES
CUDA_MALLOC((void**) &ctx->results.d_coord_tex_array,
numQueries * sizeof(int));
ctx->results.bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( cudaMemcpy((void*) ctx->results.d_coord_tex_array,
ctx->results.h_coord_tex_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ctx->results.d_match_coords = NULL;
}
fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext* ctx) {
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
ctx->results.d_match_coords = NULL;
ctx->results.bytes_on_board = 0;
#if COALESCED_QUERIES
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
#endif
}
void transferResultsFromDevice(MatchContext* ctx)
{
if (!ctx->on_cpu)
{
char* fromboardtimer = createTimer();
startTimer(fromboardtimer);
CUDA_SAFE_CALL(cudaMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
cudaMemcpyDeviceToHost) );
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_node_hist,
ctx->ref->d_node_hist,
ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int),
cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_child_hist,
ctx->ref->d_child_hist,
ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int),
cudaMemcpyDeviceToHost) );
if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height)
{
int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int));
if (ctx->statistics.node_hist_size)
memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int));
ctx->statistics.node_hist = temp;
ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height;
}
if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height)
{
temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int));
if (ctx->statistics.hist_size)
memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int));
ctx->statistics.child_hist = temp;
ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height;
}
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
{
ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i];
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
{
ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i];
}
#endif
stopTimer(fromboardtimer);
ctx->statistics.t_match_coords_from_board += getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
}
}
int flushOutput();
int addToBuffer(char* string);
char numbuffer[32];
MatchCoord* coordForQueryChar(MatchContext* ctx,
unsigned int qryid,
unsigned int qrychar)
{
MatchResults* results = &(ctx->results);
MatchCoord* coords = results->h_match_coords;
#if COALESCED_QUERIES
return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE;
#else
return coords + results->h_coord_tex_array[qryid] + qrychar;
#endif
}
void coordsToPrintBuffers(MatchContext* ctx,
ReferencePage* page,
MatchInfo** matches,
Alignment** alignments,
unsigned int mem_avail,
unsigned int* coord_idx,
unsigned int* match_idx,
unsigned int* align_idx,
unsigned int* nextqry,
unsigned int* nextqrychar)
{
unsigned int numQueries = ctx->queries->count;
int match_length = ctx->min_match_length;
unsigned int cidx = *coord_idx;
unsigned int midx = 0;
unsigned int numCoords = ctx->results.numCoords;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
int DEBUG = 0;
if (DEBUG && cidx == 0)
{
for (int j = 0; j < numCoords; ++j)
{
MatchCoord * coord = ctx->results.h_match_coords+j;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
//fprintf(stdout, "node: %d\n",
// coord->node);
fprintf(stdout, "node: %d leaves:%d\n",
coord->node.data, lookupNumLeaves(page, coord->node));
}
}
exit(0);
}
// How much can we fit into mem_avail?
for (int j = cidx; j < numCoords; ++j)
{
MatchCoord* coord = ctx->results.h_match_coords + j;
int queryAlignments = 0;
int queryMatches = 0;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
int numLeaves = lookupNumLeaves(page, coord->node);
queryAlignments += numLeaves;
queryMatches++;
}
int allMatches = numMatches + queryMatches;
int allAlignments = numAlignments + queryAlignments;
int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment);
if (neededSize > mem_avail || (allMatches/BLOCKSIZE) >= MAX_GRID_DIMENSION)
{
// adding this match won't fit on the board
break;
}
++cidx;
numMatches = allMatches;
numAlignments = allAlignments;
}
MatchInfo* M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo));
unsigned int alignmentOffset = 0;
int qry = *nextqry;
int qrychar = *nextqrychar;
bool set_full = false;
while (qry < numQueries)
{
// h_lengths_array doesn't count the 'q' at the beginning of each query
int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length;
while (qrychar < qlen)
{
if (midx >= numMatches)
{
set_full = true;
break;
}
MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar);
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK))
{
MatchInfo m;
m.resultsoffset = alignmentOffset;
m.qrystartpos = qrychar;
m.matchnode = coord->node;
m.edgematch = coord->edge_match_length;
m.numLeaves = lookupNumLeaves(page, m.matchnode);
m.queryid = qry;
alignmentOffset += m.numLeaves;
M[midx++] = m;
}
++qrychar;
}
if (set_full)
break;
++qry;
qrychar = 0;
}
*coord_idx = cidx;
*match_idx = midx;
*align_idx = alignmentOffset;
*matches = M;
*nextqry = qry;
*nextqrychar = qrychar;
fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n", alignmentOffset * sizeof(Alignment), numAlignments);
*alignments = (struct Alignment *) calloc(alignmentOffset, sizeof(Alignment));
//cudaMallocHost((void**)alignments, numAlignments * sizeof(Alignment));
}
void runPrintKernel(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
MatchInfo* d_matches;
size_t matchesSize = numMatches * sizeof(MatchInfo);
CUDA_MALLOC((void**) &d_matches, matchesSize);
struct Alignment * d_alignments;
size_t alignmentSize = numAlignments * sizeof(Alignment);
CUDA_MALLOC((void**) &d_alignments, alignmentSize);
CUDA_SAFE_CALL(cudaMemset((void*) d_alignments, 0, alignmentSize));
char* atimer = createTimer();
startTimer(atimer);
// Copy matches to card
fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments);
fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments);
int DEBUG = 0;
if (DEBUG)
{
for (int i = 0; i < numMatches; i++)
{
printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n",
i,
h_matches[i].resultsoffset,
h_matches[i].queryid,
h_matches[i].matchnode.data,
h_matches[i].numLeaves,
h_matches[i].edgematch,
h_matches[i].qrystartpos);
}
exit(0);
}
CUDA_SAFE_CALL(cudaMemcpy(d_matches, h_matches, matchesSize, cudaMemcpyHostToDevice));
stopTimer(atimer);
float mtime = getTimerValue(atimer);
// Launch the kernel
int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1);
fprintf(stderr, " Calling print kernel... ");
allocateReadWriteSets( dimGrid, dimBlock, 0 );
printKernel <<< dimGrid, dimBlock, 0 >>> (d_matches,
numMatches,
d_alignments,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int*)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode*)ctx->ref->d_node_tex_array,
#endif
#if !CHILDTEX
(_PixelOfChildren*)ctx->ref->d_children_tex_array,
#endif
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
page->begin,
page->end,
page->shadow_left,
page->shadow_right,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
, ctx->ref->d_node_hist,
ctx->ref->d_child_hist
#endif
);
freeReadWriteSets( dimGrid, dimBlock, 0 );
cudaThreadSynchronize();
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err)
{
fprintf(stderr, "Kernel execution failed: %s.\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
startTimer(atimer);
// Copy the results back to the host
CUDA_SAFE_CALL(cudaMemcpy((void*)alignments,
(void*)d_alignments,
alignmentSize,
cudaMemcpyDeviceToHost));
cudaThreadSynchronize();
stopTimer(atimer);
float atime = getTimerValue(atimer);
fprintf(stderr, "memcpy time= %f\n", atime + mtime);
deleteTimer(atimer);
// Cleanup
CUDA_SAFE_CALL(cudaFree(d_alignments));
CUDA_SAFE_CALL(cudaFree(d_matches));
}
// TODO: need reverse-complement printing support
void runPrintOnCPU(MatchContext* ctx, ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
unsigned int min_match_length = ctx->min_match_length;
int* addrs = ctx->queries->h_addrs_tex_array;
int* lengths = ctx->queries->h_lengths_array;
char* qrychars = ctx->queries->h_tex_array;
if (!numMatches)
return;
int qry = -1;
unsigned int qrylen;
for (int i = 0; i < numMatches; ++i)
{
MatchInfo& match = h_matches[i];
if (match.queryid != qry)
{
qry = match.queryid;
qrylen = lengths[qry];
}
if (!(match.edgematch & FRMASK))
{
printAlignments(page,
alignments + match.resultsoffset,
#if COALESCED_QUERIES
qrychars + sizeof(int) * addrs[qry],
#else
qrychars + addrs[qry],
#endif
qrylen,
match.matchnode,
match.qrystartpos,
match.edgematch,
min_match_length,
0,
ctx->forwardcoordinates);
}
}
}
int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen);
void getExactAlignments(MatchContext * ctx, ReferencePage * page, bool on_cpu)
{
assert(!ctx->reverse && !ctx->forwardreverse);
size_t boardFreeMemory;
size_t total_mem;
if (!on_cpu)
{
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
}
else
{
boardFreeMemory = 256 * 1024 * 1024;
total_mem = boardFreeMemory;
}
#ifdef __DEVICE_EMULATION__
boardFreeMemory = 512 * 1024 * 1024;
#endif
boardFreeMemory -= BREATHING_ROOM;
fprintf(stderr, "board free memory: %u\n", boardFreeMemory);
int rTotalMatches = 0;
int rTotalAlignments = 0;
int totalRounds = 0;
unsigned int last_coord = ctx->results.numCoords;
unsigned int next_coord = 0;
unsigned int nextqry = 0;
unsigned int nextqrychar = 0;
int lastqry = -1;
while (next_coord < last_coord)
{
// see how many queries will fit on the board
totalRounds++;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
MatchInfo* h_matches = NULL;
Alignment* h_alignments = NULL;
int coord_left = next_coord;
char* btimer = createTimer();
startTimer(btimer);
coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory,
&next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar);
stopTimer(btimer);
float btime = getTimerValue(btimer);
ctx->statistics.t_coords_to_buffers += btime;
fprintf(stderr, "buffer prep time= %f\n", btime);
deleteTimer(btimer);
fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n",
totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments);
if (numMatches == 0)
continue;
char buf[256];
//assert(qryend > qrystart);
rTotalAlignments += numAlignments;
rTotalMatches += numMatches;
if (num_bind_tex_calls > 100)
{
cudaThreadExit();
num_bind_tex_calls = 0;
loadReference(ctx);
loadQueries(ctx);
}
char* ktimer = createTimer();
startTimer(ktimer);
if (on_cpu)
{
runPrintOnCPU(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
else
{
runPrintKernel(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_print_kernel += ktime;
fprintf(stderr, "print kernel time= %f\n", ktime);
deleteTimer(ktimer);
// char* stimer = createTimer();
// startTimer(stimer);
// mapQueriesEndToEnd(ctx,
// page,
// h_matches,
// numMatches,
// h_alignments,
// numAlignments);
//
// stopTimer(stimer);
//
// float stime = getTimerValue(stimer);
// fprintf(stderr, "postprocess time= %f\n", stime);
// deleteTimer(stimer);
//flushOutput();
//Process the alignments
char* otimer = createTimer();
startTimer(otimer);
for (int m = 0; m < numMatches; m++)
{
int base = h_matches[m].resultsoffset;
for (int i = 0; i < h_matches[m].numLeaves; i++)
{
// See if there are any more left maximal alignments for this match
if (h_alignments[base+i].left_in_ref == 0)
{
break;
}
if (h_matches[m].queryid != lastqry)
{
lastqry = h_matches[m].queryid;
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + lastqry));
addToBuffer("\n");
}
sprintf(buf, "%d\t%d\t%d\n",
h_alignments[base+i].left_in_ref,
h_matches[m].qrystartpos + 1,
h_alignments[base+i].matchlen);
addToBuffer(buf);
// addMatchToBuffer(h_alignments[base+i].left_in_ref,
// h_matches[m].qrystartpos + 1,
// h_alignments[base+i].matchlen);
}
}
flushOutput();
stopTimer(otimer);
ctx->statistics.t_results_to_disk += getTimerValue(otimer);
deleteTimer(otimer);
free(h_matches);
free(h_alignments);
//cudaFreeHost((void*)h_alignments);
}
free(ctx->results.h_coord_tex_array);
free(ctx->results.h_match_coords);
ctx->results.h_coord_tex_array = NULL;
ctx->results.h_match_coords = NULL;
fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n",
rTotalMatches, rTotalAlignments, totalRounds);
}
int getQueryBlock(MatchContext* ctx, size_t device_mem_avail)
{
QuerySet* queries = ctx->queries;
char * queryTex = NULL;
int* queryAddrs = NULL;
int* queryLengths = NULL;
unsigned int numQueries;
unsigned int num_match_coords;
size_t queryLen;
char** names;
fprintf(stderr, "Loading query block... ");
char* queryreadtimer = createTimer();
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile,
&queryTex,
&queryLen,
&queryAddrs,
&names,
&queryLengths,
&numQueries,
&num_match_coords,
device_mem_avail,
ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
ctx->results.numCoords = num_match_coords;
fprintf(stderr, "done.\n");
return numQueries;
}
void destroyQueryBlock(QuerySet* queries)
{
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void resetStats(Statistics* stats)
{
stats->t_end_to_end = 0.0;
stats->t_match_kernel = 0.0;
stats->t_print_kernel = 0.0;
stats->t_queries_to_board = 0.0;
stats->t_match_coords_to_board = 0.0;
stats->t_match_coords_from_board = 0.0;
stats->t_tree_to_board = 0.0;
stats->t_ref_str_to_board = 0.0;
stats->t_queries_from_disk = 0.0;
stats->t_ref_from_disk = 0.0;
stats->t_results_to_disk = 0.0;
stats->t_tree_construction = 0.0;
stats->t_tree_reorder = 0.0;
stats->t_tree_flatten = 0.0;
stats->t_reorder_ref_str = 0.0;
stats->t_build_coord_offsets = 0.0;
stats->t_coords_to_buffers = 0.0;
stats->bp_avg_query_length = 0.0;
#if TREE_ACCESS_HISTOGRAM
if (stats->node_hist_size)
{
free(stats->node_hist);
stats->node_hist = NULL;
stats->node_hist_size = 0;
}
if (stats->child_hist_size)
{
free(stats->child_hist);
stats->child_hist = NULL;
stats->child_hist_size = 0;
}
#endif
}
void writeStatisticsFile(Statistics* stats,
char* stats_filename,
char* node_hist_filename = NULL,
char* child_hist_filename = NULL)
{
if (stats_filename)
{
FILE* f = fopen(stats_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename);
}
else
{
fprintf(f, "Q");
fprintf(f, ",R");
fprintf(f, ",T");
fprintf(f, ",m");
fprintf(f, ",r");
fprintf(f, ",t");
fprintf(f, ",n");
fprintf(f, ",Total");
fprintf(f, ",Match kernel");
fprintf(f, ",Print Kernel");
fprintf(f, ",Queries to board");
fprintf(f, ",Match coords to board");
fprintf(f, ",Match coords from board");
fprintf(f, ",Tree to board");
fprintf(f, ",Ref str to board");
fprintf(f, ",Queries from disk");
fprintf(f, ",Ref from disk");
fprintf(f, ",Output to disk");
fprintf(f, ",Tree construction");
fprintf(f, ",Tree reorder");
fprintf(f, ",Tree flatten");
fprintf(f, ",Ref reorder");
fprintf(f, ",Build coord table");
fprintf(f, ",Coords to buffers");
fprintf(f, ",Avg qry length");
fprintf(f, "\n");
fprintf(f, "%d", QRYTEX);
fprintf(f, ",%d", REFTEX);
fprintf(f, ",%d", TREETEX);
fprintf(f, ",%d", MERGETEX);
fprintf(f, ",%d", REORDER_REF);
fprintf(f, ",%d", REORDER_TREE);
fprintf(f, ",%d", RENUMBER_TREE);
fprintf(f, ",%f", stats->t_end_to_end);
fprintf(f, ",%f", stats->t_match_kernel);
fprintf(f, ",%f", stats->t_print_kernel);
fprintf(f, ",%f", stats->t_queries_to_board);
fprintf(f, ",%f", stats->t_match_coords_to_board);
fprintf(f, ",%f", stats->t_match_coords_from_board);
fprintf(f, ",%f", stats->t_tree_to_board);
fprintf(f, ",%f", stats->t_ref_str_to_board);
fprintf(f, ",%f", stats->t_queries_from_disk);
fprintf(f, ",%f", stats->t_ref_from_disk);
fprintf(f, ",%f", stats->t_results_to_disk);
fprintf(f, ",%f", stats->t_tree_construction);
fprintf(f, ",%f", stats->t_tree_reorder);
fprintf(f, ",%f", stats->t_tree_flatten);
fprintf(f, ",%f", stats->t_reorder_ref_str);
fprintf(f, ",%f", stats->t_build_coord_offsets);
fprintf(f, ",%f", stats->t_coords_to_buffers);
fprintf(f, ",%f", stats->bp_avg_query_length);
fprintf(f,"\n");
fclose(f);
}
}
#if TREE_ACCESS_HISTOGRAM
if (node_hist_filename)
{
FILE* f = fopen(node_hist_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename);
}
else
{
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]);
}
}
if (child_hist_filename)
{
FILE* f = fopen(child_hist_filename, "w");
if (!f)
{
fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename);
}
else
{
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]);
}
}
float total_node_hits = 0;
float tree_top_node_hits = 0;
float total_child_hits = 0;
float tree_top_child_hits = 0;
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
{
total_node_hits +=ctx->statistics.node_hist[i];
if (i < 256) { tree_top_node_hits += ctx->statistics.node_hist[i]; }
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
{
total_child_hits +=ctx->statistics.child_hist[i];
if (i < 256) { tree_top_child_hits += ctx->statistics.child_hist[i]; }
}
fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n",(int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits /total_node_hits);
fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n",(int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits /total_child_hits);
#endif
}
void matchOnCPU(MatchContext* ctx, bool doRC)
{
//TODO: CPU is matching is disabled.
if (doRC) {
// Match the reverse complement of the queries to the ref
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
FORWARD);
}
}
void matchOnGPU(MatchContext* ctx, bool doRC)
{
int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1);
// Match the reverse complement of the queries to the ref
if (doRC) {
//TODO: GPU RC is disabled
allocateReadWriteSets( dimGrid, dimBlock, 0 );
mummergpuRCKernel <<< dimGrid, dimBlock, 0 >>> (ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
freeReadWriteSets( dimGrid, dimBlock, 0 );
}
else {
allocateReadWriteSets( dimGrid, dimBlock, 0 );
mummergpuKernel <<< dimGrid, dimBlock, 0 >>> (ctx->results.d_match_coords,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int*)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode*)(ctx->ref->d_node_tex_array),
#endif
#if !CHILDTEX
(_PixelOfChildren*)(ctx->ref->d_children_tex_array),
#endif
#if !REFTEX
(char*)ctx->ref->d_ref_array,
#endif
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
, ctx->ref->d_node_hist,
ctx->ref->d_child_hist
#endif
);
freeReadWriteSets( dimGrid, dimBlock, 0 );
}
// check if kernel execution generated an error
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void getMatchResults(MatchContext* ctx,
unsigned int page_num)
{
transferResultsFromDevice(ctx);
}
void matchQueryBlockToReferencePage(MatchContext* ctx,
ReferencePage* page,
bool reverse_complement)
{
char* ktimer = createTimer();
fprintf(stderr, "Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
ctx->queries->bytes_on_board,
ctx->ref->bytes_on_board,
ctx->results.bytes_on_board);
startTimer(ktimer);
if (ctx->on_cpu)
{
matchOnCPU(ctx, reverse_complement);
}
else
{
matchOnGPU(ctx, reverse_complement);
cudaThreadSynchronize();
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_match_kernel += ktime;
fprintf(stderr, "match kernel time= %f\n", ktime);
deleteTimer(ktimer);
getMatchResults(ctx, page->id);
unloadResultBuffer(ctx);
}
int matchSubset(MatchContext* ctx,
ReferencePage* page)
{
loadQueries(ctx);
fprintf(stderr,
"Matching queries %s - %s against ref coords %d - %d\n",
ctx->queries->h_names[0],
ctx->queries->h_names[ctx->queries->count - 1],
page->begin,
page->end);
loadResultBuffer(ctx);
// TODO: renable RC support by calling this twice /w reverse/fwdreverse
// idiom.
matchQueryBlockToReferencePage(ctx, page, false);
if (USE_PRINT_KERNEL && !ctx->on_cpu)
{
getExactAlignments(ctx, page, false);
}
else
{
getExactAlignments(ctx, page, true);
}
flushOutput();
unloadQueries(ctx);
return 0;
}
int getFreeDeviceMemory(bool on_cpu)
{
size_t free_mem = 0;
size_t total_mem = 0;
// We have to 'prime' CUDA by making an allocation here. cuMemGetInfo
// will return zeroes until we do a malloc.
int * p = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&p, sizeof(int)));
CUDA_SAFE_CALL(cudaFree(p));
if (!on_cpu) {
boardMemory(&free_mem, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
free_mem, total_mem);
}
else {
total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX
}
return free_mem;
}
int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page)
{
fprintf(stderr, "Beginning reference page %p\n", page);
int free_mem = getFreeDeviceMemory(ctx->on_cpu);
int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM;
ctx->ref = &(page->ref);
loadReference(ctx);
while (getQueryBlock(ctx, available_mem)) {
matchSubset(ctx, page);
ctx->statistics.bp_avg_query_length =
ctx->queries->texlen / (float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
if (num_bind_tex_calls > 100)
{
cudaThreadExit();
num_bind_tex_calls = 0;
loadReference(ctx);
}
}
unloadReferenceString(ctx->ref);
unloadReferenceTree(ctx);
lseek(ctx->queries->qfile, 0, SEEK_SET);
return 0;
}
void initReferencePages( MatchContext* ctx , int* num_pages, ReferencePage** pages_out) {
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ?
BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size);
fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n",
num_reference_pages, bases_in_ref, page_size);
unsigned int page_overlap = MAX_QUERY_LEN + 1;
ReferencePage* pages = (ReferencePage*) calloc(num_reference_pages,
sizeof(ReferencePage));
pages[0].begin = 1;
pages[0].end = pages[0].begin +
page_size +
ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i-1].end;
pages[i].id = i;
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
}
*pages_out = pages;
*num_pages = num_reference_pages;
}
int streamReferenceAgainstQueries(MatchContext* ctx) {
int num_reference_pages = 0;
ReferencePage* pages = NULL;
initReferencePages(ctx, &num_reference_pages, &pages);
buildReferenceTexture(&(pages[0].ref),
ctx->full_ref,
pages[0].begin,
pages[0].end,
ctx->min_match_length,
ctx->dotfilename,
ctx->texfilename,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[0]);
destroyReference(&(pages[0].ref));
for (int i = 1; i < num_reference_pages - 1; ++i) {
buildReferenceTexture(&(pages[i].ref),
ctx->full_ref,
pages[i].begin,
pages[i].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[i]);
destroyReference(&(pages[i].ref));
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
buildReferenceTexture(&(pages[last_page].ref),
ctx->full_ref,
pages[last_page].begin,
pages[last_page].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[last_page]);
destroyReference(&(pages[last_page].ref));
}
free(pages);
return 0;
}
extern "C"
int matchQueries(MatchContext* ctx) {
assert(sizeof(struct PixelOfNode) == sizeof(uint4));
assert(sizeof(struct PixelOfChildren) == sizeof(uint4));
#if TREE_ACCESS_HISTOGRAM
ctx->statistics.node_hist_size = 0;
ctx->statistics.child_hist_size = 0;
#endif
resetStats(&(ctx->statistics));
char* ttimer = createTimer();
startTimer(ttimer);
int ret;
fprintf(stderr, "Streaming reference pages against all queries\n");
ret = streamReferenceAgainstQueries(ctx);
stopTimer(ttimer);
ctx->statistics.t_end_to_end += getTimerValue(ttimer);
deleteTimer(ttimer);
writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out");
return ret;
}
|
cbe2e0b125693b3cba5b5956bfeba5f90e3134d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <thrust/device_ptr.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <vector>
#include "NvInferPlugin.h"
#include "basicOps.cuh"
dim3 cudaGridSize(uint n)
{
uint k = (n - 1) /BLOCK + 1;
uint x = k ;
uint y = 1 ;
if (x > 65535 )
{
x = ceil(sqrt(x));
y = (n - 1 )/(x*BLOCK) + 1;
}
dim3 d = {x,y,1} ;
return d;
}
std::ostream &operator<<(std::ostream &output, const half &value) {
output << static_cast<float>(value);
return output;
}
namespace onnx2trt {
// a convenient interface to print device memory
template<typename T>
int devicePrint(const T *deviceValues, int length, const std::string &info, int step) {
T *values = (T *)malloc(sizeof(T) * length);
hipMemcpy(values, deviceValues, sizeof(T) * length, hipMemcpyDeviceToHost);
std::cout << info << ": ";
for (int i = 0; i < length; i++) {
if (step != 1) {
if (!(i % step)) {
std::cout << std::endl;
}
}
std::cout << values[i] << " ";
}
std::cout << std::endl;
free(values);
return 0;
}
template int devicePrint<float>(const float *, int, const std::string &, int);
template int devicePrint<half>(const half *, int, const std::string &, int);
template int devicePrint<int>(const int *, int, const std::string &, int);
template<typename T>
int hostPrint(const T *values, int length, const std::string &info, int step) {
std::cout << info << ": ";
for (int i = 0; i < length; i++) {
if (step != 1) {
if (!(i % step)) {
std::cout << std::endl;
}
}
std::cout << values[i] << " ";
}
std::cout << std::endl;
return 0;
}
template int hostPrint<float>(const float *, int, const std::string &, int);
template int hostPrint<half>(const half *, int, const std::string &, int);
template int hostPrint<int>(const int *, int, const std::string &, int);
// namespace ops {
// // refer to detectron2.layers.ops.mask_head_slice
// // perform slice operation on prediction masks with given class indices
// // inputs:
// // logits, (num_det, class_num, mask_height, mask_width)
// // pred, (num_det,)
// // outputs:
// // output, (num_det, mask_height, mask_width)
// template<typename T, typename I>
// __global__ void sliceKernel(const int64_t nThreads, const T *logits, const I *pred, T *output,
// int numMasks, int numClasses, int MaskHeight, int MaskWidth) {
// CUDA_1D_KERNEL_LOOP(index, nThreads) {
// int pw = index % MaskWidth;
// int ph = (index / MaskWidth) % MaskHeight;
// int n = index / MaskWidth / MaskHeight;
// int classIndex = (int)pred[n];
// const T *offset = logits +
// (((int64_t)n * (int64_t)numClasses + classIndex) * (int64_t)MaskHeight + ph) * (int64_t)MaskWidth + pw;
// output[index] = *offset;
// }
// }
// hipError_t maskHeadSlice(hipStream_t stream, const void *logits, const void *pred, void *output,
// int numMasks, int numClasses, int MaskHeight, int MaskWidth, nvinfer1::DataType type) {
// auto outputSize = (int64_t)numMasks * (int64_t)MaskHeight * (int64_t)MaskWidth;
// #ifdef VERBOSE
// std::vector<std::string> types {
// "kFLOAT", "kHALF", "kINT8", "kINT32"
// };
// LOGGER << "type: " << types[int(type)] << std::endl;
// std::cout << "output: " << numMasks << ", 1, " << MaskHeight << ", " << MaskWidth << std::endl;
// std::cout << "blockNum: " << outputSize << ", " << onnx2trt::ATenCeilDiv(outputSize, 256L) << ", " << 4096L << std::endl;
// #endif
// dim3 grid(::min(onnx2trt::ATenCeilDiv(outputSize, (int64_t)256), (int64_t)4096));
// dim3 block(256);
// assert(type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF);
// if (type == nvinfer1::DataType::kFLOAT) {
// sliceKernel<<<grid, block, 0, stream>>>(
// outputSize, static_cast<const float *>(logits), static_cast<const int *>(pred),
// static_cast<float *>(output), numMasks, numClasses, MaskHeight, MaskWidth);
// } else {
// sliceKernel<<<grid, block, 0, stream>>>(
// outputSize, static_cast<const half *>(logits), static_cast<const int *>(pred),
// static_cast<half *>(output), numMasks, numClasses, MaskHeight, MaskWidth);
// }
// return hipGetLastError();
// }
// template<typename T>
// struct toInt {
// const T epsilon_;
// explicit toInt(T epsilon) : epsilon_(epsilon) {}
// __device__ int operator()(const T &input) const {
// return (int)(input + epsilon_);
// }
// };
// template<typename D, typename T>
// struct to {
// __device__ T operator()(const D &input) const {
// return static_cast<T>(input);
// }
// };
// hipError_t cudaCast(hipStream_t stream, const void *input, void *output,
// nvinfer1::Dims dims, nvinfer1::DataType type, nvinfer1::DataType castType) {
// #ifdef VERBOSE
// std::vector<std::string> types {
// "kFLOAT", "kHALF", "kINT8", "kINT32"
// };
// LOGGER << "type: " << types[int(type)] << ", castType: " << types[int(castType)]
// << ", dimension: " << dims.nbDims << ", shape (";
// #endif
// int64_t outputSize = 1;
// for (int i = 0; i < dims.nbDims; i++) {
// outputSize *= (int64_t)dims.d[i];
// #ifdef VERBOSE
// std::cout << dims.d[i] << ", ";
// #endif
// }
// #ifdef VERBOSE
// std::cout << ")" << std::endl;
// LOGGER << "blockNum: " << outputSize << " " << onnx2trt::ATenCeilDiv(outputSize, 256L) << " " << 4096L << std::endl;
// #endif
// if (type == castType) {
// return hipErrorInvalidValue;
// }
// assert(type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF);
// switch (castType) {
// case nvinfer1::DataType::kINT8:
// return hipErrorInvalidValue;
// case nvinfer1::DataType::kINT32:
// if (type == nvinfer1::DataType::kFLOAT) {
// thrust::device_ptr<const float> input_ptr = thrust::device_pointer_cast(static_cast<const float *>(input));
// thrust::device_ptr<int> output_ptr = thrust::device_pointer_cast(static_cast<int *>(output));
// thrust::transform(input_ptr, input_ptr + outputSize, output_ptr, toInt<float>(0.1));
// } else {
// thrust::device_ptr<const half> input_ptr = thrust::device_pointer_cast(static_cast<const half *>(input));
// thrust::device_ptr<int> output_ptr = thrust::device_pointer_cast(static_cast<int *>(output));
// thrust::transform(input_ptr, input_ptr + outputSize, output_ptr, toInt<half>(0.1));
// }
// break;
// case nvinfer1::DataType::kHALF:
// if (type == nvinfer1::DataType::kFLOAT) {
// thrust::device_ptr<const float> input_ptr = thrust::device_pointer_cast(static_cast<const float *>(input));
// thrust::device_ptr<half> output_ptr = thrust::device_pointer_cast(static_cast<half *>(output));
// thrust::transform(input_ptr, input_ptr + outputSize, output_ptr, to<float, half>());
// }
// case nvinfer1::DataType::kFLOAT:
// if (type == nvinfer1::DataType::kHALF) {
// thrust::device_ptr<const half> input_ptr = thrust::device_pointer_cast(static_cast<const half *>(input));
// thrust::device_ptr<float> output_ptr = thrust::device_pointer_cast(static_cast<float *>(output));
// thrust::transform(input_ptr, input_ptr + outputSize, output_ptr, to<half, float>());
// }
// }
// return hipGetLastError();
// }
// struct modulus {
// const int denominator;
// modulus(int _denominator) : denominator(_denominator) {}
// __device__ int operator()(const int &input) const {
// return input % denominator;
// }
// };
// hipError_t fmod(hipStream_t stream, const int *inputs, int *outputs, int length, int denominator) {
// thrust::device_ptr<const int> inputs_ptr = thrust::device_pointer_cast(inputs);
// thrust::device_ptr<int> outputs_ptr = thrust::device_pointer_cast(outputs);
// thrust::transform(inputs_ptr, inputs_ptr + length, outputs_ptr, modulus(denominator));
// return hipGetLastError();
// }
// }
}
| cbe2e0b125693b3cba5b5956bfeba5f90e3134d1.cu |
#include <cassert>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <iostream>
#include <thrust/device_ptr.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <vector>
#include "NvInferPlugin.h"
#include "basicOps.cuh"
dim3 cudaGridSize(uint n)
{
uint k = (n - 1) /BLOCK + 1;
uint x = k ;
uint y = 1 ;
if (x > 65535 )
{
x = ceil(sqrt(x));
y = (n - 1 )/(x*BLOCK) + 1;
}
dim3 d = {x,y,1} ;
return d;
}
std::ostream &operator<<(std::ostream &output, const half &value) {
output << static_cast<float>(value);
return output;
}
namespace onnx2trt {
// a convenient interface to print device memory
template<typename T>
int devicePrint(const T *deviceValues, int length, const std::string &info, int step) {
T *values = (T *)malloc(sizeof(T) * length);
cudaMemcpy(values, deviceValues, sizeof(T) * length, cudaMemcpyDeviceToHost);
std::cout << info << ": ";
for (int i = 0; i < length; i++) {
if (step != 1) {
if (!(i % step)) {
std::cout << std::endl;
}
}
std::cout << values[i] << " ";
}
std::cout << std::endl;
free(values);
return 0;
}
template int devicePrint<float>(const float *, int, const std::string &, int);
template int devicePrint<half>(const half *, int, const std::string &, int);
template int devicePrint<int>(const int *, int, const std::string &, int);
template<typename T>
int hostPrint(const T *values, int length, const std::string &info, int step) {
std::cout << info << ": ";
for (int i = 0; i < length; i++) {
if (step != 1) {
if (!(i % step)) {
std::cout << std::endl;
}
}
std::cout << values[i] << " ";
}
std::cout << std::endl;
return 0;
}
template int hostPrint<float>(const float *, int, const std::string &, int);
template int hostPrint<half>(const half *, int, const std::string &, int);
template int hostPrint<int>(const int *, int, const std::string &, int);
// namespace ops {
// // refer to detectron2.layers.ops.mask_head_slice
// // perform slice operation on prediction masks with given class indices
// // inputs:
// // logits, (num_det, class_num, mask_height, mask_width)
// // pred, (num_det,)
// // outputs:
// // output, (num_det, mask_height, mask_width)
// template<typename T, typename I>
// __global__ void sliceKernel(const int64_t nThreads, const T *logits, const I *pred, T *output,
// int numMasks, int numClasses, int MaskHeight, int MaskWidth) {
// CUDA_1D_KERNEL_LOOP(index, nThreads) {
// int pw = index % MaskWidth;
// int ph = (index / MaskWidth) % MaskHeight;
// int n = index / MaskWidth / MaskHeight;
// int classIndex = (int)pred[n];
// const T *offset = logits +
// (((int64_t)n * (int64_t)numClasses + classIndex) * (int64_t)MaskHeight + ph) * (int64_t)MaskWidth + pw;
// output[index] = *offset;
// }
// }
// cudaError_t maskHeadSlice(cudaStream_t stream, const void *logits, const void *pred, void *output,
// int numMasks, int numClasses, int MaskHeight, int MaskWidth, nvinfer1::DataType type) {
// auto outputSize = (int64_t)numMasks * (int64_t)MaskHeight * (int64_t)MaskWidth;
// #ifdef VERBOSE
// std::vector<std::string> types {
// "kFLOAT", "kHALF", "kINT8", "kINT32"
// };
// LOGGER << "type: " << types[int(type)] << std::endl;
// std::cout << "output: " << numMasks << ", 1, " << MaskHeight << ", " << MaskWidth << std::endl;
// std::cout << "blockNum: " << outputSize << ", " << onnx2trt::ATenCeilDiv(outputSize, 256L) << ", " << 4096L << std::endl;
// #endif
// dim3 grid(std::min(onnx2trt::ATenCeilDiv(outputSize, (int64_t)256), (int64_t)4096));
// dim3 block(256);
// assert(type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF);
// if (type == nvinfer1::DataType::kFLOAT) {
// sliceKernel<<<grid, block, 0, stream>>>(
// outputSize, static_cast<const float *>(logits), static_cast<const int *>(pred),
// static_cast<float *>(output), numMasks, numClasses, MaskHeight, MaskWidth);
// } else {
// sliceKernel<<<grid, block, 0, stream>>>(
// outputSize, static_cast<const half *>(logits), static_cast<const int *>(pred),
// static_cast<half *>(output), numMasks, numClasses, MaskHeight, MaskWidth);
// }
// return cudaGetLastError();
// }
// template<typename T>
// struct toInt {
// const T epsilon_;
// explicit toInt(T epsilon) : epsilon_(epsilon) {}
// __device__ int operator()(const T &input) const {
// return (int)(input + epsilon_);
// }
// };
// template<typename D, typename T>
// struct to {
// __device__ T operator()(const D &input) const {
// return static_cast<T>(input);
// }
// };
// cudaError_t cudaCast(cudaStream_t stream, const void *input, void *output,
// nvinfer1::Dims dims, nvinfer1::DataType type, nvinfer1::DataType castType) {
// #ifdef VERBOSE
// std::vector<std::string> types {
// "kFLOAT", "kHALF", "kINT8", "kINT32"
// };
// LOGGER << "type: " << types[int(type)] << ", castType: " << types[int(castType)]
// << ", dimension: " << dims.nbDims << ", shape (";
// #endif
// int64_t outputSize = 1;
// for (int i = 0; i < dims.nbDims; i++) {
// outputSize *= (int64_t)dims.d[i];
// #ifdef VERBOSE
// std::cout << dims.d[i] << ", ";
// #endif
// }
// #ifdef VERBOSE
// std::cout << ")" << std::endl;
// LOGGER << "blockNum: " << outputSize << " " << onnx2trt::ATenCeilDiv(outputSize, 256L) << " " << 4096L << std::endl;
// #endif
// if (type == castType) {
// return cudaErrorInvalidValue;
// }
// assert(type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF);
// switch (castType) {
// case nvinfer1::DataType::kINT8:
// return cudaErrorInvalidValue;
// case nvinfer1::DataType::kINT32:
// if (type == nvinfer1::DataType::kFLOAT) {
// thrust::device_ptr<const float> input_ptr = thrust::device_pointer_cast(static_cast<const float *>(input));
// thrust::device_ptr<int> output_ptr = thrust::device_pointer_cast(static_cast<int *>(output));
// thrust::transform(input_ptr, input_ptr + outputSize, output_ptr, toInt<float>(0.1));
// } else {
// thrust::device_ptr<const half> input_ptr = thrust::device_pointer_cast(static_cast<const half *>(input));
// thrust::device_ptr<int> output_ptr = thrust::device_pointer_cast(static_cast<int *>(output));
// thrust::transform(input_ptr, input_ptr + outputSize, output_ptr, toInt<half>(0.1));
// }
// break;
// case nvinfer1::DataType::kHALF:
// if (type == nvinfer1::DataType::kFLOAT) {
// thrust::device_ptr<const float> input_ptr = thrust::device_pointer_cast(static_cast<const float *>(input));
// thrust::device_ptr<half> output_ptr = thrust::device_pointer_cast(static_cast<half *>(output));
// thrust::transform(input_ptr, input_ptr + outputSize, output_ptr, to<float, half>());
// }
// case nvinfer1::DataType::kFLOAT:
// if (type == nvinfer1::DataType::kHALF) {
// thrust::device_ptr<const half> input_ptr = thrust::device_pointer_cast(static_cast<const half *>(input));
// thrust::device_ptr<float> output_ptr = thrust::device_pointer_cast(static_cast<float *>(output));
// thrust::transform(input_ptr, input_ptr + outputSize, output_ptr, to<half, float>());
// }
// }
// return cudaGetLastError();
// }
// struct modulus {
// const int denominator;
// modulus(int _denominator) : denominator(_denominator) {}
// __device__ int operator()(const int &input) const {
// return input % denominator;
// }
// };
// cudaError_t fmod(cudaStream_t stream, const int *inputs, int *outputs, int length, int denominator) {
// thrust::device_ptr<const int> inputs_ptr = thrust::device_pointer_cast(inputs);
// thrust::device_ptr<int> outputs_ptr = thrust::device_pointer_cast(outputs);
// thrust::transform(inputs_ptr, inputs_ptr + length, outputs_ptr, modulus(denominator));
// return cudaGetLastError();
// }
// }
}
|
584a643bebe35cba6408fcc637a61383f8c0e6a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <op_boilerplate.h>
#include <loops/reduce_bool.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
#include <types/types.h>
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void simpleReduce(void *x, Nd4jLong *xShapeInfo,
void *extraParams,
void *z, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
functions::reduce::ReduceBoolFunction<X,Z>::template transformCudaXD<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void simpleScalar(void *x, Nd4jLong *xShapeInfo,
void *extraParams,
void *z, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) {
functions::reduce::ReduceBoolFunction<X, Z>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo);
}
namespace functions {
namespace reduce {
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceBoolFunction<X,Z>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto sPartials = reinterpret_cast<Z*>(vsPartials);
auto extraParams = reinterpret_cast<X*>(vextraParams);
Nd4jLong floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1))
floorPow2 &= floorPow2 - 1;
if (tid >= floorPow2)
sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems)
sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceBoolFunction<X,Z>::transformCudaXD( void *vx, Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
void *vreductionBuffer,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
auto x = reinterpret_cast<X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer);
//shared memory space for storing intermediate results
__shared__ Z* sPartials;
__shared__ int tadLength;
__shared__ int numTads;
__shared__ bool isPlainOutput;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = reinterpret_cast<Z*>(shmem);
tadLength = shape::length(tadOnlyShapeInfo); //tadLength(xShapeInfo, dimension, dimensionLength);
numTads = shape::length(xShapeInfo) / tadLength;
isPlainOutput = shape::order(zShapeInfo) == 'c' && shape::elementWiseStride(zShapeInfo) == 1;
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jLong tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingValue(x + tadOffsetForBlock);
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength);
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[xOffset], extraParams), extraParams);
}
__syncthreads();
// aggregate. do NOT reduce for elements > tadLength
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0)
z[isPlainOutput ? r : shape::getIndexOffset(r, zShapeInfo, numTads)] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceBoolFunction<X,Z>::execScalarCuda(void *vx, Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong *zShapeInfo,
void *vreductionBuffer,
Nd4jLong *tadOnlyShapeInfo) {
auto x = reinterpret_cast<X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer);
auto tid = blockDim.x * blockIdx.x + threadIdx.x;
//shared memory space for storing intermediate results
__shared__ Z* sPartials;
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong len;
if(threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = reinterpret_cast<Z*>(shmem);
xEws = shape::elementWiseStride(xShapeInfo);
len = shape::length(xShapeInfo);
}
__syncthreads();
sPartials[threadIdx.x] = OpType::startingValue(x);
if (xEws > 0)
for (int i = tid; i < len; i += (blockDim.x * gridDim.x))
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams);
else
for (int i = tid; i < len; i += blockDim.x * gridDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo, len)], extraParams), extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, len), extraParams);
__syncthreads();
if (gridDim.x > 1) {
unsigned int *tc = (unsigned int *)reductionBuffer;
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams);
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
sPartials[threadIdx.x] = OpType::startingValue(x);
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
else {
if (threadIdx.x == 0) {
unsigned int *tc = (unsigned *)reductionBuffer;
tc[16384] = 0;
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template<typename OpType>
__host__ void ReduceBoolFunction<X,Z>::intermediateXD(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShape, void *extraParams, void *z, Nd4jLong *zShape, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( simpleReduce<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "reduceBoolDim(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template<typename OpType>
__host__ void ReduceBoolFunction<X,Z>::intermediateScalar(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) {
hipLaunchKernelGGL(( simpleScalar<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "reduceBoolScalar(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
_CUDA_H void ReduceBoolFunction<X,Y>::execReduceScalar(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM_TT(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_BOOL_OPS));
nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
_CUDA_H void ReduceBoolFunction<X,Y>::execReduceXD(dim3 launchDims, hipStream_t *stream, int opNum, int rank, void *x, Nd4jLong *xShape, void *extraParams, void *z, Nd4jLong *zShape, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(intermediateXD, PARAMS(launchDims, stream, x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_BOOL_OPS));
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
template <typename X>
__device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) {
int sPartialsLength = sMemSize / sizeof(X);
X *sPartialsDeref = (X *) *sPartials;
for (int i = 0; i < sPartialsLength; i++)
sPartialsDeref[i] = extraParams[0];
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ReduceBoolFunction, , LIBND4J_TYPES, BOOL_TYPES);
}
}
| 584a643bebe35cba6408fcc637a61383f8c0e6a4.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <op_boilerplate.h>
#include <loops/reduce_bool.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
#include <types/types.h>
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void simpleReduce(void *x, Nd4jLong *xShapeInfo,
void *extraParams,
void *z, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
functions::reduce::ReduceBoolFunction<X,Z>::template transformCudaXD<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
__global__ void simpleScalar(void *x, Nd4jLong *xShapeInfo,
void *extraParams,
void *z, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) {
functions::reduce::ReduceBoolFunction<X, Z>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo);
}
namespace functions {
namespace reduce {
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceBoolFunction<X,Z>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto sPartials = reinterpret_cast<Z*>(vsPartials);
auto extraParams = reinterpret_cast<X*>(vextraParams);
Nd4jLong floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1))
floorPow2 &= floorPow2 - 1;
if (tid >= floorPow2)
sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems)
sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceBoolFunction<X,Z>::transformCudaXD( void *vx, Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
void *vreductionBuffer,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
auto x = reinterpret_cast<X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer);
//shared memory space for storing intermediate results
__shared__ Z* sPartials;
__shared__ int tadLength;
__shared__ int numTads;
__shared__ bool isPlainOutput;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = reinterpret_cast<Z*>(shmem);
tadLength = shape::length(tadOnlyShapeInfo); //tadLength(xShapeInfo, dimension, dimensionLength);
numTads = shape::length(xShapeInfo) / tadLength;
isPlainOutput = shape::order(zShapeInfo) == 'c' && shape::elementWiseStride(zShapeInfo) == 1;
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jLong tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingValue(x + tadOffsetForBlock);
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength);
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[xOffset], extraParams), extraParams);
}
__syncthreads();
// aggregate. do NOT reduce for elements > tadLength
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0)
z[isPlainOutput ? r : shape::getIndexOffset(r, zShapeInfo, numTads)] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
__device__ void ReduceBoolFunction<X,Z>::execScalarCuda(void *vx, Nd4jLong *xShapeInfo,
void *vextraParams,
void *vz, Nd4jLong *zShapeInfo,
void *vreductionBuffer,
Nd4jLong *tadOnlyShapeInfo) {
auto x = reinterpret_cast<X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer);
auto tid = blockDim.x * blockIdx.x + threadIdx.x;
//shared memory space for storing intermediate results
__shared__ Z* sPartials;
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong len;
if(threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = reinterpret_cast<Z*>(shmem);
xEws = shape::elementWiseStride(xShapeInfo);
len = shape::length(xShapeInfo);
}
__syncthreads();
sPartials[threadIdx.x] = OpType::startingValue(x);
if (xEws > 0)
for (int i = tid; i < len; i += (blockDim.x * gridDim.x))
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams);
else
for (int i = tid; i < len; i += blockDim.x * gridDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo, len)], extraParams), extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, len), extraParams);
__syncthreads();
if (gridDim.x > 1) {
unsigned int *tc = (unsigned int *)reductionBuffer;
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams);
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
sPartials[threadIdx.x] = OpType::startingValue(x);
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
else {
if (threadIdx.x == 0) {
unsigned int *tc = (unsigned *)reductionBuffer;
tc[16384] = 0;
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template<typename OpType>
__host__ void ReduceBoolFunction<X,Z>::intermediateXD(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShape, void *extraParams, void *z, Nd4jLong *zShape, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
simpleReduce<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "reduceBoolDim(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template<typename OpType>
__host__ void ReduceBoolFunction<X,Z>::intermediateScalar(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) {
simpleScalar<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "reduceBoolScalar(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
_CUDA_H void ReduceBoolFunction<X,Y>::execReduceScalar(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM_TT(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_BOOL_OPS));
nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
_CUDA_H void ReduceBoolFunction<X,Y>::execReduceXD(dim3 launchDims, cudaStream_t *stream, int opNum, int rank, void *x, Nd4jLong *xShape, void *extraParams, void *z, Nd4jLong *zShape, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(intermediateXD, PARAMS(launchDims, stream, x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_BOOL_OPS));
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
template <typename X>
__device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) {
int sPartialsLength = sMemSize / sizeof(X);
X *sPartialsDeref = (X *) *sPartials;
for (int i = 0; i < sPartialsLength; i++)
sPartialsDeref[i] = extraParams[0];
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ReduceBoolFunction, , LIBND4J_TYPES, BOOL_TYPES);
}
}
|
ea5ab5b28cc8f203452849e867c91a9524f3b7a8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "GPUmat.hh"
// static paramaters
static int init = 0;
static GPUmat *gm;
#include "cudaCommon.h"
//double **getGPUSourcePointers(const mxArray *prhs[], int num, int *retNumel);
//double **makeGPUDestinationArrays(GPUtype src, mxArray *retArray[], int howmany);
//double *makeDestinationArray(GPUtype src, mxArray *retArray[]);
#define OP_SOUNDSPEED 1
#define OP_GASPRESSURE 2
#define OP_TOTALPRESSURE 3
#define OP_MAGPRESSURE 4
#define OP_TOTALANDSND 5
#define OP_WARRAYS 6
#define OP_RELAXINGFLUX 7
#define OP_SEPERATELRFLUX 8
__global__ void cukern_Soundspeed(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_GasPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_TotalPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_MagneticPressure(double *bx, double *by, double *bz, double *dout, int n);
__global__ void cukern_TotalAndSound(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *total, double *sound, double gam, int n);
__global__ void cukern_CalcWArrays(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *P, double *Cfreeze, double *rhoW, double *enerW, double *pxW, double *pyW, double *pzW, int dir, int n);
__global__ void cukern_SeperateLRFlux(double *arr, double *wArr, double *left, double *right, int n);
__global__ void cukern_PerformFlux(double *array0, double *Cfreeze, double *fluxRa, double *fluxRb, double *fluxLa, double *fluxLb, double *out, double lambda, int n);
#define BLOCKWIDTH 256
#define THREADLOOPS 1
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
if (init == 0) {
// Initialize function
// mexLock();
// load GPUmat
gm = gmGetGPUmat();
init = 1;
}
// Determine appropriate number of arguments for RHS
if (nrhs < 2) mexErrMsgTxt("Require at least (computation type, input argument)");
int operation = (int)*mxGetPr(prhs[0]);
dim3 blocksize; blocksize.x = BLOCKWIDTH; blocksize.y = blocksize.z = 1;
int numel; dim3 gridsize;
// Select the appropriate kernel to invoke
if((operation == OP_SOUNDSPEED) || (operation == OP_GASPRESSURE) || (operation == OP_TOTALPRESSURE)) {
if( (nlhs != 1) || (nrhs != 10)) { mexErrMsgTxt("Soundspeed operator is Cs = cudaMHDKernels(1, rho, E, px, py, pz, bx, by, bz, gamma)"); }
double gam = *mxGetPr(prhs[9]);
int arrdim[3];
double **srcs = getGPUSourcePointers(prhs, arrdim, 1, 8);
numel = arrdim[0]*arrdim[1]*arrdim[2];
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 1, gm);
//printf("%i %i %i %i %i %i\n", blocksize.x, blocksize.y, blocksize.z, gridsize.x, gridsize.y, gridsize.z);
switch(operation) {
case OP_SOUNDSPEED: hipLaunchKernelGGL(( cukern_Soundspeed), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], destPtr[0], gam, numel); break;
case OP_GASPRESSURE: hipLaunchKernelGGL(( cukern_GasPressure), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], destPtr[0], gam, numel); break;
case OP_TOTALPRESSURE:hipLaunchKernelGGL(( cukern_TotalPressure), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], destPtr[0], gam, numel); break;
}
free(destPtr);
} else if((operation == OP_MAGPRESSURE)) {
if( (nlhs != 1) || (nrhs != 4)) { mexErrMsgTxt("Magnetic pressure operator is Pm = cudaMHDKernels(4, bx, by, bz)"); }
double **srcs = getGPUSourcePointers(prhs, 3, &numel, 1, gm);
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 1, gm);
hipLaunchKernelGGL(( cukern_MagneticPressure), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], destPtr[0], numel);
free(destPtr); free(srcs);
} else if((operation == OP_TOTALANDSND)) {
if( (nlhs != 2) || (nrhs != 10)) { mexErrMsgTxt("Soundspeed operator is [Ptot Cs] = cudaMHDKernels(5, rho, E, px, py, pz, bx, by, bz, gamma)"); }
double gam = *mxGetPr(prhs[9]);
double **srcs = getGPUSourcePointers(prhs, 8, &numel, 1, gm);
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 2, gm);
hipLaunchKernelGGL(( cukern_TotalAndSound), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], destPtr[0], destPtr[1], gam, numel);
free(destPtr); free(srcs);
} else if ((operation == OP_WARRAYS)) {
if( (nlhs != 5) || (nrhs != 12)) { mexErrMsgTxt("solving W operator is [rhoW enerW pxW pyW pzW] = cudaMHDKernels(6, rho, E, px, py, pz, bx, by, bz, P, cFreeze, direction)"); }
int dir = (int)*mxGetPr(prhs[11]);
double **srcs = getGPUSourcePointers(prhs, 10, &numel, 1, gm);
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 5, gm);
hipLaunchKernelGGL(( cukern_CalcWArrays), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], srcs[8], srcs[9], destPtr[0], destPtr[1], destPtr[2], destPtr[3], destPtr[4], dir, numel);
free(destPtr); free(srcs);
} else if ((operation == OP_RELAXINGFLUX)) {
if( (nlhs != 1) || (nrhs != 8)) { mexErrMsgTxt("relaxing flux operator is fluxed = cudaMHDKernels(7, old, tempfreeze, right, right_shifted, left, left_shifted, lambda)"); }
double lambda = *mxGetPr(prhs[7]);
double **srcs = getGPUSourcePointers(prhs, 6, &numel, 1, gm);
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 1, gm);
hipLaunchKernelGGL(( cukern_PerformFlux), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], destPtr[0], lambda, numel);
free(destPtr); free(srcs);
} else if ((operation == OP_SEPERATELRFLUX)) {
if ((nlhs != 2) || (nrhs != 3)) { mexErrMsgTxt("flux seperation operator is [Fl Fr] = cudaMHDKernels(8, array, wArray)"); }
double **srcs = getGPUSourcePointers(prhs, 2, &numel, 1, gm);
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 2, gm);
hipLaunchKernelGGL(( cukern_SeperateLRFlux), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], destPtr[0], destPtr[1], numel);
free(destPtr); free(srcs);
}
}
//#define KERNEL_PREAMBLE int x = THREADLOOPS*(threadIdx.x + blockDim.x*blockIdx.x); if (x >= n) {return;} int imax; ((x+THREADLOOPS) > n) ? imax = n : imax = x + THREADLOOPS; for(; x < imax; x++)
#define KERNEL_PREAMBLE int x = threadIdx.x + blockDim.x*blockIdx.x; if (x >= n) { return; }
// THIS KERNEL CALCULATES SOUNDSPEED
__global__ void cukern_Soundspeed(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
double gg1 = gam*(gam-1.0);
KERNEL_PREAMBLE
dout[x] = sqrt(abs( (gg1*(E[x] - .5*(px[x]*px[x] + py[x]*py[x] + pz[x]*pz[x])/rho[x]) + (2.0 -.5*gg1)*(bx[x]*bx[x] + by[x]*by[x] + bz[x]*bz[x]))/rho[x] ));
}
// THIS KERNEL CALCULATES GAS PRESSURE
__global__ void cukern_GasPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
KERNEL_PREAMBLE
dout[x] = (gam-1.0)*(E[x] - .5*((px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x])/rho[x] + bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]));
}
// THIS KERNEL CALCULATES TOTAL PRESSURE
__global__ void cukern_TotalPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
KERNEL_PREAMBLE
dout[x] = (gam-1.0)*(E[x] - .5*((px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x])/rho[x])) + .5*(2.0-gam)*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
}
// THIS KERNEL CALCULATES MAGNETIC PRESSURE
__global__ void cukern_MagneticPressure(double *bx, double *by, double *bz, double *dout, int n)
{
KERNEL_PREAMBLE
dout[x] = .5*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
}
__global__ void cukern_TotalAndSound(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *total, double *sound, double gam, int n)
{
double gg1 = gam*(gam-1.0);
double psqhf, bsqhf;
KERNEL_PREAMBLE {
psqhf = .5*(px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x]);
bsqhf = .5*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
total[x] = (gam-1.0)*(E[x] - psqhf/rho[x]) + (2.0-gam)*bsqhf;
sound[x] = sqrt(abs( (gg1*(E[x] - psqhf/rho[x]) + (4.0 - gg1)*bsqhf)/rho[x] ));
}
}
__global__ void cukern_CalcWArrays(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *P, double *Cfreeze, double *rhoW, double *enerW, double *pxW, double *pyW, double *pzW, int dir, int n)
{
double Cinv, rhoinv;
KERNEL_PREAMBLE {
Cinv = 1.0/Cfreeze[x];
rhoinv = 1.0/rho[x];
switch(dir) {
case 1:
rhoW[x] = px[x] * Cinv;
enerW[x] = (px[x] * (E[x] + P[x]) - bx[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (px[x]*px[x]*rhoinv + P[x] - bx[x]*bx[x])*Cinv;
pyW[x] = (px[x]*py[x]*rhoinv - bx[x]*by[x])*Cinv;
pzW[x] = (px[x]*pz[x]*rhoinv - bx[x]*bz[x])*Cinv;
break;
case 2:
rhoW[x] = py[x] * Cinv;
enerW[x] = (py[x] * (E[x] + P[x]) - by[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (py[x]*px[x]*rhoinv - by[x]*bx[x])*Cinv;
pyW[x] = (py[x]*py[x]*rhoinv + P[x] - by[x]*by[x])*Cinv;
pzW[x] = (py[x]*pz[x]*rhoinv - by[x]*bz[x])*Cinv;
break;
case 3:
rhoW[x] = pz[x] * Cinv;
enerW[x] = (pz[x] * (E[x] + P[x]) - bz[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (pz[x]*px[x]*rhoinv - bz[x]*bx[x])*Cinv;
pyW[x] = (pz[x]*py[x]*rhoinv - bz[x]*by[x])*Cinv;
pzW[x] = (pz[x]*pz[x]*rhoinv + P[x] - bz[x]*bz[x])*Cinv;
break;
}
}
/*mass.wArray = mom(X).array ./ freezeSpd.array;
%--- ENERGY DENSITY ---%
ener.wArray = velocity .* (ener.array + press) - mag(X).cellMag.array .* ...
( mag(1).cellMag.array .* mom(1).array ...
+ mag(2).cellMag.array .* mom(2).array ...
+ mag(3).cellMag.array .* mom(3).array) ./ mass.array;
ener.wArray = ener.wArray ./ freezeSpd.array;
%--- MOMENTUM DENSITY ---%
for i=1:3
mom(i).wArray = (velocity .* mom(i).array + press*dirVec(i)...
- mag(X).cellMag.array .* mag(i).cellMag.array) ./ freezeSpd.array;
end*/
}
__global__ void cukern_PerformFlux(double *array0, double *Cfreeze, double *fluxRa, double *fluxRb, double *fluxLa, double *fluxLb, double *out, double lambda, int n)
{
KERNEL_PREAMBLE
out[x] = array0[x] - lambda*Cfreeze[x]*(fluxRa[x] - fluxRb[x] + fluxLa[x] - fluxLb[x]);
//v(i).store.array = v(i).array - 0.5*fluxFactor .* tempFreeze .* ...
// ( v(i).store.fluxR.array - v(i).store.fluxR.shift(X,-1) ...
// + v(i).store.fluxL.array - v(i).store.fluxL.shift(X,1) );
}
__global__ void cukern_SeperateLRFlux(double *arr, double *wArr, double *left, double *right, int n)
{
KERNEL_PREAMBLE {
left[x] = .5*(arr[x] - wArr[x]);
right[x] = .5*(arr[x] + wArr[x]);
}
}
| ea5ab5b28cc8f203452849e867c91a9524f3b7a8.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "GPUmat.hh"
// static paramaters
static int init = 0;
static GPUmat *gm;
#include "cudaCommon.h"
//double **getGPUSourcePointers(const mxArray *prhs[], int num, int *retNumel);
//double **makeGPUDestinationArrays(GPUtype src, mxArray *retArray[], int howmany);
//double *makeDestinationArray(GPUtype src, mxArray *retArray[]);
#define OP_SOUNDSPEED 1
#define OP_GASPRESSURE 2
#define OP_TOTALPRESSURE 3
#define OP_MAGPRESSURE 4
#define OP_TOTALANDSND 5
#define OP_WARRAYS 6
#define OP_RELAXINGFLUX 7
#define OP_SEPERATELRFLUX 8
__global__ void cukern_Soundspeed(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_GasPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_TotalPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_MagneticPressure(double *bx, double *by, double *bz, double *dout, int n);
__global__ void cukern_TotalAndSound(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *total, double *sound, double gam, int n);
__global__ void cukern_CalcWArrays(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *P, double *Cfreeze, double *rhoW, double *enerW, double *pxW, double *pyW, double *pzW, int dir, int n);
__global__ void cukern_SeperateLRFlux(double *arr, double *wArr, double *left, double *right, int n);
__global__ void cukern_PerformFlux(double *array0, double *Cfreeze, double *fluxRa, double *fluxRb, double *fluxLa, double *fluxLb, double *out, double lambda, int n);
#define BLOCKWIDTH 256
#define THREADLOOPS 1
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
if (init == 0) {
// Initialize function
// mexLock();
// load GPUmat
gm = gmGetGPUmat();
init = 1;
}
// Determine appropriate number of arguments for RHS
if (nrhs < 2) mexErrMsgTxt("Require at least (computation type, input argument)");
int operation = (int)*mxGetPr(prhs[0]);
dim3 blocksize; blocksize.x = BLOCKWIDTH; blocksize.y = blocksize.z = 1;
int numel; dim3 gridsize;
// Select the appropriate kernel to invoke
if((operation == OP_SOUNDSPEED) || (operation == OP_GASPRESSURE) || (operation == OP_TOTALPRESSURE)) {
if( (nlhs != 1) || (nrhs != 10)) { mexErrMsgTxt("Soundspeed operator is Cs = cudaMHDKernels(1, rho, E, px, py, pz, bx, by, bz, gamma)"); }
double gam = *mxGetPr(prhs[9]);
int arrdim[3];
double **srcs = getGPUSourcePointers(prhs, arrdim, 1, 8);
numel = arrdim[0]*arrdim[1]*arrdim[2];
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 1, gm);
//printf("%i %i %i %i %i %i\n", blocksize.x, blocksize.y, blocksize.z, gridsize.x, gridsize.y, gridsize.z);
switch(operation) {
case OP_SOUNDSPEED: cukern_Soundspeed<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], destPtr[0], gam, numel); break;
case OP_GASPRESSURE: cukern_GasPressure<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], destPtr[0], gam, numel); break;
case OP_TOTALPRESSURE: cukern_TotalPressure<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], destPtr[0], gam, numel); break;
}
free(destPtr);
} else if((operation == OP_MAGPRESSURE)) {
if( (nlhs != 1) || (nrhs != 4)) { mexErrMsgTxt("Magnetic pressure operator is Pm = cudaMHDKernels(4, bx, by, bz)"); }
double **srcs = getGPUSourcePointers(prhs, 3, &numel, 1, gm);
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 1, gm);
cukern_MagneticPressure<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], destPtr[0], numel);
free(destPtr); free(srcs);
} else if((operation == OP_TOTALANDSND)) {
if( (nlhs != 2) || (nrhs != 10)) { mexErrMsgTxt("Soundspeed operator is [Ptot Cs] = cudaMHDKernels(5, rho, E, px, py, pz, bx, by, bz, gamma)"); }
double gam = *mxGetPr(prhs[9]);
double **srcs = getGPUSourcePointers(prhs, 8, &numel, 1, gm);
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 2, gm);
cukern_TotalAndSound<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], destPtr[0], destPtr[1], gam, numel);
free(destPtr); free(srcs);
} else if ((operation == OP_WARRAYS)) {
if( (nlhs != 5) || (nrhs != 12)) { mexErrMsgTxt("solving W operator is [rhoW enerW pxW pyW pzW] = cudaMHDKernels(6, rho, E, px, py, pz, bx, by, bz, P, cFreeze, direction)"); }
int dir = (int)*mxGetPr(prhs[11]);
double **srcs = getGPUSourcePointers(prhs, 10, &numel, 1, gm);
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 5, gm);
cukern_CalcWArrays<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], srcs[8], srcs[9], destPtr[0], destPtr[1], destPtr[2], destPtr[3], destPtr[4], dir, numel);
free(destPtr); free(srcs);
} else if ((operation == OP_RELAXINGFLUX)) {
if( (nlhs != 1) || (nrhs != 8)) { mexErrMsgTxt("relaxing flux operator is fluxed = cudaMHDKernels(7, old, tempfreeze, right, right_shifted, left, left_shifted, lambda)"); }
double lambda = *mxGetPr(prhs[7]);
double **srcs = getGPUSourcePointers(prhs, 6, &numel, 1, gm);
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 1, gm);
cukern_PerformFlux<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], destPtr[0], lambda, numel);
free(destPtr); free(srcs);
} else if ((operation == OP_SEPERATELRFLUX)) {
if ((nlhs != 2) || (nrhs != 3)) { mexErrMsgTxt("flux seperation operator is [Fl Fr] = cudaMHDKernels(8, array, wArray)"); }
double **srcs = getGPUSourcePointers(prhs, 2, &numel, 1, gm);
gridsize.x = numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double **destPtr = makeGPUDestinationArrays(gm->gputype.getGPUtype(prhs[1]), plhs, 2, gm);
cukern_SeperateLRFlux<<<gridsize, blocksize>>>(srcs[0], srcs[1], destPtr[0], destPtr[1], numel);
free(destPtr); free(srcs);
}
}
//#define KERNEL_PREAMBLE int x = THREADLOOPS*(threadIdx.x + blockDim.x*blockIdx.x); if (x >= n) {return;} int imax; ((x+THREADLOOPS) > n) ? imax = n : imax = x + THREADLOOPS; for(; x < imax; x++)
#define KERNEL_PREAMBLE int x = threadIdx.x + blockDim.x*blockIdx.x; if (x >= n) { return; }
// THIS KERNEL CALCULATES SOUNDSPEED
__global__ void cukern_Soundspeed(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
double gg1 = gam*(gam-1.0);
KERNEL_PREAMBLE
dout[x] = sqrt(abs( (gg1*(E[x] - .5*(px[x]*px[x] + py[x]*py[x] + pz[x]*pz[x])/rho[x]) + (2.0 -.5*gg1)*(bx[x]*bx[x] + by[x]*by[x] + bz[x]*bz[x]))/rho[x] ));
}
// THIS KERNEL CALCULATES GAS PRESSURE
__global__ void cukern_GasPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
KERNEL_PREAMBLE
dout[x] = (gam-1.0)*(E[x] - .5*((px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x])/rho[x] + bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]));
}
// THIS KERNEL CALCULATES TOTAL PRESSURE
__global__ void cukern_TotalPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
KERNEL_PREAMBLE
dout[x] = (gam-1.0)*(E[x] - .5*((px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x])/rho[x])) + .5*(2.0-gam)*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
}
// THIS KERNEL CALCULATES MAGNETIC PRESSURE
__global__ void cukern_MagneticPressure(double *bx, double *by, double *bz, double *dout, int n)
{
KERNEL_PREAMBLE
dout[x] = .5*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
}
__global__ void cukern_TotalAndSound(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *total, double *sound, double gam, int n)
{
double gg1 = gam*(gam-1.0);
double psqhf, bsqhf;
KERNEL_PREAMBLE {
psqhf = .5*(px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x]);
bsqhf = .5*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
total[x] = (gam-1.0)*(E[x] - psqhf/rho[x]) + (2.0-gam)*bsqhf;
sound[x] = sqrt(abs( (gg1*(E[x] - psqhf/rho[x]) + (4.0 - gg1)*bsqhf)/rho[x] ));
}
}
__global__ void cukern_CalcWArrays(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *P, double *Cfreeze, double *rhoW, double *enerW, double *pxW, double *pyW, double *pzW, int dir, int n)
{
double Cinv, rhoinv;
KERNEL_PREAMBLE {
Cinv = 1.0/Cfreeze[x];
rhoinv = 1.0/rho[x];
switch(dir) {
case 1:
rhoW[x] = px[x] * Cinv;
enerW[x] = (px[x] * (E[x] + P[x]) - bx[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (px[x]*px[x]*rhoinv + P[x] - bx[x]*bx[x])*Cinv;
pyW[x] = (px[x]*py[x]*rhoinv - bx[x]*by[x])*Cinv;
pzW[x] = (px[x]*pz[x]*rhoinv - bx[x]*bz[x])*Cinv;
break;
case 2:
rhoW[x] = py[x] * Cinv;
enerW[x] = (py[x] * (E[x] + P[x]) - by[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (py[x]*px[x]*rhoinv - by[x]*bx[x])*Cinv;
pyW[x] = (py[x]*py[x]*rhoinv + P[x] - by[x]*by[x])*Cinv;
pzW[x] = (py[x]*pz[x]*rhoinv - by[x]*bz[x])*Cinv;
break;
case 3:
rhoW[x] = pz[x] * Cinv;
enerW[x] = (pz[x] * (E[x] + P[x]) - bz[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (pz[x]*px[x]*rhoinv - bz[x]*bx[x])*Cinv;
pyW[x] = (pz[x]*py[x]*rhoinv - bz[x]*by[x])*Cinv;
pzW[x] = (pz[x]*pz[x]*rhoinv + P[x] - bz[x]*bz[x])*Cinv;
break;
}
}
/*mass.wArray = mom(X).array ./ freezeSpd.array;
%--- ENERGY DENSITY ---%
ener.wArray = velocity .* (ener.array + press) - mag(X).cellMag.array .* ...
( mag(1).cellMag.array .* mom(1).array ...
+ mag(2).cellMag.array .* mom(2).array ...
+ mag(3).cellMag.array .* mom(3).array) ./ mass.array;
ener.wArray = ener.wArray ./ freezeSpd.array;
%--- MOMENTUM DENSITY ---%
for i=1:3
mom(i).wArray = (velocity .* mom(i).array + press*dirVec(i)...
- mag(X).cellMag.array .* mag(i).cellMag.array) ./ freezeSpd.array;
end*/
}
__global__ void cukern_PerformFlux(double *array0, double *Cfreeze, double *fluxRa, double *fluxRb, double *fluxLa, double *fluxLb, double *out, double lambda, int n)
{
KERNEL_PREAMBLE
out[x] = array0[x] - lambda*Cfreeze[x]*(fluxRa[x] - fluxRb[x] + fluxLa[x] - fluxLb[x]);
//v(i).store.array = v(i).array - 0.5*fluxFactor .* tempFreeze .* ...
// ( v(i).store.fluxR.array - v(i).store.fluxR.shift(X,-1) ...
// + v(i).store.fluxL.array - v(i).store.fluxL.shift(X,1) );
}
__global__ void cukern_SeperateLRFlux(double *arr, double *wArr, double *left, double *right, int n)
{
KERNEL_PREAMBLE {
left[x] = .5*(arr[x] - wArr[x]);
right[x] = .5*(arr[x] + wArr[x]);
}
}
|
decoderPlugin.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include "decoderPlugin.h"
#define CHECK(status) \
do \
{ \
auto ret = (status); \
if (ret != 0) \
{ \
std::cout << "Cuda failure: " << ret << std::endl; \
abort(); \
} \
} while (0)
using namespace nvinfer1;
using nvinfer1::plugin::RNNTDecoderPlugin;
using nvinfer1::plugin::RNNTDecoderPluginCreator;
REGISTER_TENSORRT_PLUGIN(RNNTDecoderPluginCreator);
RNNTDecoderPlugin::RNNTDecoderPlugin(const PluginFieldCollection *fc) {
int idx = 0;
mNumLayers = *(int*)(fc->fields[idx].data);
idx++;
mHiddenSize = *(int*)(fc->fields[idx].data);
idx++;
mInputSize = *(int*)(fc->fields[idx].data);
idx++;
mDataType = *(nvinfer1::DataType*)(fc->fields[idx].data);
idx++;
mWeights_h = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
mWeights_h[i] = (void*)fc->fields[idx].data;
idx++;
}
mBias_h = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
mBias_h[i] = (void*)fc->fields[idx].data;
idx++;
}
}
RNNTDecoderPlugin::RNNTDecoderPlugin(const void* data, size_t length) {
const char *d = static_cast<const char*>(data);
// Use maybe_unused attribute when updating to CUDA_STANDARD C++17
#ifndef NDEBUG
auto d_start = d;
#endif
read<int>(d, mNumLayers);
read<int>(d, mHiddenSize);
read<int>(d, mInputSize);
read<nvinfer1::DataType>(d, mDataType);
mWeights_h = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
dataTypeSize = sizeof(half);
size_t sz = 4 * mHiddenSize * ((i == 0 ? mInputSize : mHiddenSize) + mHiddenSize) * dataTypeSize;
mWeights_h[i] = malloc(sz);
memcpy(mWeights_h[i], d, sz);
d += sz;
}
mBias_h = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
dataTypeSize = sizeof(half);
size_t sz = 8 * mHiddenSize * dataTypeSize;
mBias_h[i] = malloc(sz);
memcpy(mBias_h[i], d, sz);
d += sz;
}
assert(d == d_start + length);
}
const char* RNNTDecoderPlugin::getPluginType() const {
return "RNNTDecoderPlugin";
}
const char* RNNTDecoderPlugin::getPluginVersion() const {
return "1";
}
void RNNTDecoderPlugin::setPluginNamespace(const char* libNamespace) {
mNamespace = libNamespace;
}
const char* RNNTDecoderPlugin::getPluginNamespace() const {
return mNamespace.c_str();
}
void RNNTDecoderPlugin::destroy() {
if (mWeights_h) {
free(mWeights_h);
mWeights_h = nullptr;
}
if (mBias_h) {
free(mBias_h);
mBias_h = nullptr;
}
delete this;
}
void RNNTDecoderPlugin::setCUDAInfo(hipStream_t mStreamh, hipblasHandle_t mCublas, void **mWeights_d, void **mBias_d, void *mWorkSpace_d) {
this->mStreamh = mStreamh;
this->mCublas = mCublas;
this->mWeights_d = mWeights_d;
this->mBias_d = mBias_d;
this->mWorkSpace_d = mWorkSpace_d;
}
IPluginV2DynamicExt * RNNTDecoderPlugin::clone() const {
size_t sz = getSerializationSize();
char *buff = (char*)malloc(getSerializationSize());
serialize(buff);
RNNTDecoderPlugin* ret = new RNNTDecoderPlugin(buff, sz);
ret->setCUDAInfo(mStreamh, mCublas, mWeights_d, mBias_d, mWorkSpace_d);
free(buff);
return ret;
}
int RNNTDecoderPlugin::getNbOutputs() const {
return 3;
}
// TODO: No idea if this needs batch size. Actually, don't know what's expected at all.
/* Dims RNNTDecoderPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) {
assert(index >= 0 && index < this->getNbOutputs());
if (index == 0) {
return Dims3(inputs[0].d[0], mNumLayers, mHiddenSize);
}
else {
return Dims3(inputs[0].d[0], 1, mHiddenSize);
}
} */
DimsExprs RNNTDecoderPlugin::getOutputDimensions (int outputIndex, const DimsExprs *inputs, int nbInputs, IExprBuilder &exprBuilder) {
assert(outputIndex >= 0 && outputIndex < this->getNbOutputs());
return inputs[outputIndex];
}
bool RNNTDecoderPlugin::supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) {
if (inOut[pos].format != TensorFormat::kNCHW)
return false;
// fp16 I/O
if (mDataType == nvinfer1::DataType::kHALF) {
bool allHalf = true;
// Don't care about pos. If all are half pass it.
// The way this is called doesn't fill all of inOut, it only fills it up to pos.
for (int i = 0; i <= pos; i++) {
if (inOut[i].type != DataType::kHALF) {
allHalf = false;
}
}
if (allHalf) {
return true;
}
return false;
}
return false;
}
/* void RNNTDecoderPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) {
mInputSize = in[0].dims.d[in[0].dims.nbDims - 1];
} */
void RNNTDecoderPlugin::configurePlugin (const DynamicPluginTensorDesc *in, int nbInputs, const DynamicPluginTensorDesc *out, int nbOutputs) {
// mInputSize = in[0].desc.dims.d[in[0].desc.dims.nbDims - 1];
}
// void RNNTDecoderPlugin::configurePlugin(const Dims *inputDims, int nbInputs, const Dims *outputDims, int nbOutputs, const DataType *inputTypes, const DataType *outputTypes, const bool *inputIsBroadcast, const bool *outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) {
// mInputSize = inputDims[0].d[inputDims[0].nbDims - 1];
// }
int RNNTDecoderPlugin::initialize() {
if (!mInitialized) {
CHECK(hipblasCreate(&mCublas));
CHECK(cublasSetMathMode(mCublas, CUBLAS_TENSOR_OP_MATH));
CHECK(hipStreamCreate(&mStreamh));
mWeights_d = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
size_t sz = 4 * mHiddenSize * ((i == 0 ? mInputSize : mHiddenSize) + mHiddenSize) * dataTypeSize;
CHECK(hipMalloc(&mWeights_d[i], sz));
CHECK(hipMemcpy(mWeights_d[i], mWeights_h[i], sz, hipMemcpyHostToDevice));
}
mBias_d = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
size_t sz = 8 * mHiddenSize * dataTypeSize;
CHECK(hipMalloc(&mBias_d[i], sz));
CHECK(hipMemcpy(mBias_d[i], mBias_h[i], sz, hipMemcpyHostToDevice));
}
mWorkSpace_d = NULL;// CHECK(hipMalloc(&mWorkSpace_d, getWorkspaceSize()));
}
return hipSuccess;
}
void RNNTDecoderPlugin::terminate() {
if (mCublas) {
CHECK(hipblasDestroy(mCublas));
mCublas = nullptr;
}
if (mStreamh) {
CHECK(hipStreamDestroy(mStreamh));
mStreamh = nullptr;
}
if (mWeights_d) {
for (int i = 0; i < mNumLayers; i++) {
if (mWeights_d[i]) {
hipFree(mWeights_d[i]);
mWeights_d[i] = nullptr;
}
}
free(mWeights_d);
mWeights_d = nullptr;
}
if (mBias_d) {
for (int i = 0; i < mNumLayers; i++) {
if (mBias_d[i]) {
hipFree(mBias_d[i]);
mBias_d[i] = nullptr;
}
}
free(mBias_d);
mBias_d = nullptr;
}
if (!mWorkSpace_d) {
hipFree(mWorkSpace_d);
mWorkSpace_d = nullptr;
}
}
/* size_t RNNTDecoderPlugin::getWorkspaceSize(int maxBatchSize) const {
size_t size = 0;
// tmp_io
size += mNumLayers * mInputSize * maxBatchSize * sizeof(half);
// tmp_i
size += mHiddenSize * maxBatchSize * 4 * sizeof(half);
// tmp_h
size += mNumLayers * mHiddenSize * maxBatchSize * 4 * sizeof(half);
return size;
} */
size_t RNNTDecoderPlugin::getWorkspaceSize(const PluginTensorDesc *inputs, int nbInputs, const PluginTensorDesc *outputs, int nbOutputs) const {
size_t size = 0;
int batchSize = inputs[0].dims.d[0];
// printf("getWorkspaceSize batchSize %d\n", batchSize);
// tmp_io
size += mNumLayers * mHiddenSize * batchSize * sizeof(half);
// tmp_i
size += mHiddenSize * batchSize * 4 * sizeof(half);
// tmp_h
size += mNumLayers * mHiddenSize * batchSize * 4 * sizeof(half);
return size;
}
// int RNNTDecoderPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream) {
int RNNTDecoderPlugin::enqueue(const PluginTensorDesc *inputDesc, const PluginTensorDesc *outputDesc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) {
int batchSize = inputDesc[0].dims.d[0];
int effectiveBatch = batchSize;
void *tmp_io = NULL;
void *tmp_i = NULL;
void *tmp_h = NULL;
tmp_io = workspace;
tmp_i = (void*)((char*)(tmp_io) + mNumLayers * mHiddenSize * effectiveBatch * sizeof(half));
tmp_h = (void*)((char*)(tmp_i) + mHiddenSize * effectiveBatch * 4 * sizeof(half));
hipEvent_t event;
CHECK(hipEventCreate(&event, hipEventDisableTiming));
CHECK(hipEventRecord(event, stream));
CHECK(hipStreamWaitEvent(mStreamh, event, 0));
CHECK(hipEventDestroy(event));
if (mDataType == nvinfer1::DataType::kHALF) {
decoderStep<half, HIP_R_16F, half, HIP_R_16F, half>
(mHiddenSize,
mInputSize,
effectiveBatch,
1,
mNumLayers,
this->mCublas,
(half*)inputs[0], // x
(half*)inputs[1], // hx,
(half*)inputs[2], // cx,
(half**)mWeights_d,
(half**)mBias_d, // bias
(half*)outputs[0], // y,
(half*)outputs[1], // hy,
(half*)outputs[2], // cy,
(half*)tmp_io,
(half*)tmp_i,
(half*)tmp_h,
stream,
mStreamh);
}
return 0;
}
size_t RNNTDecoderPlugin::getSerializationSize() const {
size_t sz = sizeof(mNumLayers) + sizeof(mHiddenSize) + sizeof(mInputSize) + sizeof(mDataType);
// Weights
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
sz += 4 * mHiddenSize * ((i == 0 ? mInputSize : mHiddenSize) + mHiddenSize) * dataTypeSize;
}
// Bias
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
sz += 8 * mHiddenSize * dataTypeSize;
}
return sz;
}
void RNNTDecoderPlugin::serialize(void* buffer) const {
char *d = static_cast<char*>(buffer);
// Use maybe_unused attribute when updating to CUDA_STANDARD C++17
#ifndef NDEBUG
auto d_start = d;
#endif
write<int>(d, mNumLayers);
write<int>(d, mHiddenSize);
write<int>(d, mInputSize);
write<nvinfer1::DataType>(d, mDataType);
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
size_t sz = 4 * mHiddenSize * ((i == 0 ? mInputSize : mHiddenSize) + mHiddenSize) * dataTypeSize;
memcpy(d, mWeights_h[i], sz);
d += sz;
}
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
size_t sz = 8 * mHiddenSize * dataTypeSize;
memcpy(d, mBias_h[i], sz);
d += sz;
}
assert(d == d_start + getSerializationSize());
}
nvinfer1::DataType RNNTDecoderPlugin::getOutputDataType (int index, const nvinfer1::DataType *inputTypes, int nbInputs) const {
return mDataType;
}
// bool RNNTDecoderPlugin::isOutputBroadcastAcrossBatch (int outputIndex, const bool *inputIsBroadcasted, int nbInputs) const {
// return false;
// }
// bool RNNTDecoderPlugin::canBroadcastInputAcrossBatch (int inputIndex) const {
// return inputIndex >= 2 * mNumLayers + 2;
// }
template <typename T>
void RNNTDecoderPlugin::write(char*& buffer, const T& val) const
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template <typename T>
void RNNTDecoderPlugin::read(const char*& buffer, T& val) const
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
const char* RNNTDecoderPluginCreator::getPluginName() const {
return "RNNTDecoderPlugin";
}
const char* RNNTDecoderPluginCreator::getPluginVersion() const {
return "1";
}
const PluginFieldCollection* RNNTDecoderPluginCreator::getFieldNames() {
return nullptr;
}
void RNNTDecoderPluginCreator::setPluginNamespace(const char* libNamespace) {
mNamespace = libNamespace;
}
const char* RNNTDecoderPluginCreator::getPluginNamespace() const {
return mNamespace.c_str();
}
IPluginV2DynamicExt * RNNTDecoderPluginCreator::createPlugin(const char *name, const PluginFieldCollection *fc) {
return new RNNTDecoderPlugin(fc);
}
IPluginV2DynamicExt * RNNTDecoderPluginCreator::deserializePlugin(const char *name, const void *serialData, size_t serialLength) {
return new RNNTDecoderPlugin(serialData, serialLength);
}
| decoderPlugin.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda.h>
#include "decoderPlugin.h"
#define CHECK(status) \
do \
{ \
auto ret = (status); \
if (ret != 0) \
{ \
std::cout << "Cuda failure: " << ret << std::endl; \
abort(); \
} \
} while (0)
using namespace nvinfer1;
using nvinfer1::plugin::RNNTDecoderPlugin;
using nvinfer1::plugin::RNNTDecoderPluginCreator;
REGISTER_TENSORRT_PLUGIN(RNNTDecoderPluginCreator);
RNNTDecoderPlugin::RNNTDecoderPlugin(const PluginFieldCollection *fc) {
int idx = 0;
mNumLayers = *(int*)(fc->fields[idx].data);
idx++;
mHiddenSize = *(int*)(fc->fields[idx].data);
idx++;
mInputSize = *(int*)(fc->fields[idx].data);
idx++;
mDataType = *(nvinfer1::DataType*)(fc->fields[idx].data);
idx++;
mWeights_h = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
mWeights_h[i] = (void*)fc->fields[idx].data;
idx++;
}
mBias_h = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
mBias_h[i] = (void*)fc->fields[idx].data;
idx++;
}
}
RNNTDecoderPlugin::RNNTDecoderPlugin(const void* data, size_t length) {
const char *d = static_cast<const char*>(data);
// Use maybe_unused attribute when updating to CUDA_STANDARD C++17
#ifndef NDEBUG
auto d_start = d;
#endif
read<int>(d, mNumLayers);
read<int>(d, mHiddenSize);
read<int>(d, mInputSize);
read<nvinfer1::DataType>(d, mDataType);
mWeights_h = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
dataTypeSize = sizeof(half);
size_t sz = 4 * mHiddenSize * ((i == 0 ? mInputSize : mHiddenSize) + mHiddenSize) * dataTypeSize;
mWeights_h[i] = malloc(sz);
memcpy(mWeights_h[i], d, sz);
d += sz;
}
mBias_h = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
dataTypeSize = sizeof(half);
size_t sz = 8 * mHiddenSize * dataTypeSize;
mBias_h[i] = malloc(sz);
memcpy(mBias_h[i], d, sz);
d += sz;
}
assert(d == d_start + length);
}
const char* RNNTDecoderPlugin::getPluginType() const {
return "RNNTDecoderPlugin";
}
const char* RNNTDecoderPlugin::getPluginVersion() const {
return "1";
}
void RNNTDecoderPlugin::setPluginNamespace(const char* libNamespace) {
mNamespace = libNamespace;
}
const char* RNNTDecoderPlugin::getPluginNamespace() const {
return mNamespace.c_str();
}
void RNNTDecoderPlugin::destroy() {
if (mWeights_h) {
free(mWeights_h);
mWeights_h = nullptr;
}
if (mBias_h) {
free(mBias_h);
mBias_h = nullptr;
}
delete this;
}
void RNNTDecoderPlugin::setCUDAInfo(cudaStream_t mStreamh, cublasHandle_t mCublas, void **mWeights_d, void **mBias_d, void *mWorkSpace_d) {
this->mStreamh = mStreamh;
this->mCublas = mCublas;
this->mWeights_d = mWeights_d;
this->mBias_d = mBias_d;
this->mWorkSpace_d = mWorkSpace_d;
}
IPluginV2DynamicExt * RNNTDecoderPlugin::clone() const {
size_t sz = getSerializationSize();
char *buff = (char*)malloc(getSerializationSize());
serialize(buff);
RNNTDecoderPlugin* ret = new RNNTDecoderPlugin(buff, sz);
ret->setCUDAInfo(mStreamh, mCublas, mWeights_d, mBias_d, mWorkSpace_d);
free(buff);
return ret;
}
int RNNTDecoderPlugin::getNbOutputs() const {
return 3;
}
// TODO: No idea if this needs batch size. Actually, don't know what's expected at all.
/* Dims RNNTDecoderPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) {
assert(index >= 0 && index < this->getNbOutputs());
if (index == 0) {
return Dims3(inputs[0].d[0], mNumLayers, mHiddenSize);
}
else {
return Dims3(inputs[0].d[0], 1, mHiddenSize);
}
} */
DimsExprs RNNTDecoderPlugin::getOutputDimensions (int outputIndex, const DimsExprs *inputs, int nbInputs, IExprBuilder &exprBuilder) {
assert(outputIndex >= 0 && outputIndex < this->getNbOutputs());
return inputs[outputIndex];
}
bool RNNTDecoderPlugin::supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) {
if (inOut[pos].format != TensorFormat::kNCHW)
return false;
// fp16 I/O
if (mDataType == nvinfer1::DataType::kHALF) {
bool allHalf = true;
// Don't care about pos. If all are half pass it.
// The way this is called doesn't fill all of inOut, it only fills it up to pos.
for (int i = 0; i <= pos; i++) {
if (inOut[i].type != DataType::kHALF) {
allHalf = false;
}
}
if (allHalf) {
return true;
}
return false;
}
return false;
}
/* void RNNTDecoderPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) {
mInputSize = in[0].dims.d[in[0].dims.nbDims - 1];
} */
void RNNTDecoderPlugin::configurePlugin (const DynamicPluginTensorDesc *in, int nbInputs, const DynamicPluginTensorDesc *out, int nbOutputs) {
// mInputSize = in[0].desc.dims.d[in[0].desc.dims.nbDims - 1];
}
// void RNNTDecoderPlugin::configurePlugin(const Dims *inputDims, int nbInputs, const Dims *outputDims, int nbOutputs, const DataType *inputTypes, const DataType *outputTypes, const bool *inputIsBroadcast, const bool *outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) {
// mInputSize = inputDims[0].d[inputDims[0].nbDims - 1];
// }
int RNNTDecoderPlugin::initialize() {
if (!mInitialized) {
CHECK(cublasCreate(&mCublas));
CHECK(cublasSetMathMode(mCublas, CUBLAS_TENSOR_OP_MATH));
CHECK(cudaStreamCreate(&mStreamh));
mWeights_d = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
size_t sz = 4 * mHiddenSize * ((i == 0 ? mInputSize : mHiddenSize) + mHiddenSize) * dataTypeSize;
CHECK(cudaMalloc(&mWeights_d[i], sz));
CHECK(cudaMemcpy(mWeights_d[i], mWeights_h[i], sz, cudaMemcpyHostToDevice));
}
mBias_d = (void**)malloc(mNumLayers * sizeof(void*));
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
size_t sz = 8 * mHiddenSize * dataTypeSize;
CHECK(cudaMalloc(&mBias_d[i], sz));
CHECK(cudaMemcpy(mBias_d[i], mBias_h[i], sz, cudaMemcpyHostToDevice));
}
mWorkSpace_d = NULL;// CHECK(cudaMalloc(&mWorkSpace_d, getWorkspaceSize()));
}
return cudaSuccess;
}
void RNNTDecoderPlugin::terminate() {
if (mCublas) {
CHECK(cublasDestroy(mCublas));
mCublas = nullptr;
}
if (mStreamh) {
CHECK(cudaStreamDestroy(mStreamh));
mStreamh = nullptr;
}
if (mWeights_d) {
for (int i = 0; i < mNumLayers; i++) {
if (mWeights_d[i]) {
cudaFree(mWeights_d[i]);
mWeights_d[i] = nullptr;
}
}
free(mWeights_d);
mWeights_d = nullptr;
}
if (mBias_d) {
for (int i = 0; i < mNumLayers; i++) {
if (mBias_d[i]) {
cudaFree(mBias_d[i]);
mBias_d[i] = nullptr;
}
}
free(mBias_d);
mBias_d = nullptr;
}
if (!mWorkSpace_d) {
cudaFree(mWorkSpace_d);
mWorkSpace_d = nullptr;
}
}
/* size_t RNNTDecoderPlugin::getWorkspaceSize(int maxBatchSize) const {
size_t size = 0;
// tmp_io
size += mNumLayers * mInputSize * maxBatchSize * sizeof(half);
// tmp_i
size += mHiddenSize * maxBatchSize * 4 * sizeof(half);
// tmp_h
size += mNumLayers * mHiddenSize * maxBatchSize * 4 * sizeof(half);
return size;
} */
size_t RNNTDecoderPlugin::getWorkspaceSize(const PluginTensorDesc *inputs, int nbInputs, const PluginTensorDesc *outputs, int nbOutputs) const {
size_t size = 0;
int batchSize = inputs[0].dims.d[0];
// printf("getWorkspaceSize batchSize %d\n", batchSize);
// tmp_io
size += mNumLayers * mHiddenSize * batchSize * sizeof(half);
// tmp_i
size += mHiddenSize * batchSize * 4 * sizeof(half);
// tmp_h
size += mNumLayers * mHiddenSize * batchSize * 4 * sizeof(half);
return size;
}
// int RNNTDecoderPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) {
int RNNTDecoderPlugin::enqueue(const PluginTensorDesc *inputDesc, const PluginTensorDesc *outputDesc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) {
int batchSize = inputDesc[0].dims.d[0];
int effectiveBatch = batchSize;
void *tmp_io = NULL;
void *tmp_i = NULL;
void *tmp_h = NULL;
tmp_io = workspace;
tmp_i = (void*)((char*)(tmp_io) + mNumLayers * mHiddenSize * effectiveBatch * sizeof(half));
tmp_h = (void*)((char*)(tmp_i) + mHiddenSize * effectiveBatch * 4 * sizeof(half));
cudaEvent_t event;
CHECK(cudaEventCreate(&event, cudaEventDisableTiming));
CHECK(cudaEventRecord(event, stream));
CHECK(cudaStreamWaitEvent(mStreamh, event, 0));
CHECK(cudaEventDestroy(event));
if (mDataType == nvinfer1::DataType::kHALF) {
decoderStep<half, CUDA_R_16F, half, CUDA_R_16F, half>
(mHiddenSize,
mInputSize,
effectiveBatch,
1,
mNumLayers,
this->mCublas,
(half*)inputs[0], // x
(half*)inputs[1], // hx,
(half*)inputs[2], // cx,
(half**)mWeights_d,
(half**)mBias_d, // bias
(half*)outputs[0], // y,
(half*)outputs[1], // hy,
(half*)outputs[2], // cy,
(half*)tmp_io,
(half*)tmp_i,
(half*)tmp_h,
stream,
mStreamh);
}
return 0;
}
size_t RNNTDecoderPlugin::getSerializationSize() const {
size_t sz = sizeof(mNumLayers) + sizeof(mHiddenSize) + sizeof(mInputSize) + sizeof(mDataType);
// Weights
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
sz += 4 * mHiddenSize * ((i == 0 ? mInputSize : mHiddenSize) + mHiddenSize) * dataTypeSize;
}
// Bias
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
sz += 8 * mHiddenSize * dataTypeSize;
}
return sz;
}
void RNNTDecoderPlugin::serialize(void* buffer) const {
char *d = static_cast<char*>(buffer);
// Use maybe_unused attribute when updating to CUDA_STANDARD C++17
#ifndef NDEBUG
auto d_start = d;
#endif
write<int>(d, mNumLayers);
write<int>(d, mHiddenSize);
write<int>(d, mInputSize);
write<nvinfer1::DataType>(d, mDataType);
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
size_t sz = 4 * mHiddenSize * ((i == 0 ? mInputSize : mHiddenSize) + mHiddenSize) * dataTypeSize;
memcpy(d, mWeights_h[i], sz);
d += sz;
}
for (int i = 0; i < mNumLayers; i++) {
size_t dataTypeSize = 0;
if (mDataType == DataType::kHALF) {
dataTypeSize = sizeof(half);
}
size_t sz = 8 * mHiddenSize * dataTypeSize;
memcpy(d, mBias_h[i], sz);
d += sz;
}
assert(d == d_start + getSerializationSize());
}
nvinfer1::DataType RNNTDecoderPlugin::getOutputDataType (int index, const nvinfer1::DataType *inputTypes, int nbInputs) const {
return mDataType;
}
// bool RNNTDecoderPlugin::isOutputBroadcastAcrossBatch (int outputIndex, const bool *inputIsBroadcasted, int nbInputs) const {
// return false;
// }
// bool RNNTDecoderPlugin::canBroadcastInputAcrossBatch (int inputIndex) const {
// return inputIndex >= 2 * mNumLayers + 2;
// }
template <typename T>
void RNNTDecoderPlugin::write(char*& buffer, const T& val) const
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template <typename T>
void RNNTDecoderPlugin::read(const char*& buffer, T& val) const
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
const char* RNNTDecoderPluginCreator::getPluginName() const {
return "RNNTDecoderPlugin";
}
const char* RNNTDecoderPluginCreator::getPluginVersion() const {
return "1";
}
const PluginFieldCollection* RNNTDecoderPluginCreator::getFieldNames() {
return nullptr;
}
void RNNTDecoderPluginCreator::setPluginNamespace(const char* libNamespace) {
mNamespace = libNamespace;
}
const char* RNNTDecoderPluginCreator::getPluginNamespace() const {
return mNamespace.c_str();
}
IPluginV2DynamicExt * RNNTDecoderPluginCreator::createPlugin(const char *name, const PluginFieldCollection *fc) {
return new RNNTDecoderPlugin(fc);
}
IPluginV2DynamicExt * RNNTDecoderPluginCreator::deserializePlugin(const char *name, const void *serialData, size_t serialLength) {
return new RNNTDecoderPlugin(serialData, serialLength);
}
|
b73044b6530df01f6887bfff35431a36b9a6129d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* *
* Distributed Hash Cracker v3.0 *
* *
* Copyright (c) 2009 RPISEC. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without modifi- *
* cation, are permitted provided that the following conditions are met: *
* *
* * Redistributions of source code must retain the above copyright notice *
* this list of conditions and the following disclaimer. *
* *
* * Redistributions in binary form must reproduce the above copyright *
* notice, this list of conditions and the following disclaimer in the *
* documentation and/or other materials provided with the distribution. *
* *
* * Neither the name of RPISEC nor the names of its contributors may be *
* used to endorse or promote products derived from this software without *
* specific prior written permission. *
* *
* THIS SOFTWARE IS PROVIDED BY RPISEC "AS IS" AND ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN *
* NO EVENT SHALL RPISEC BE HELD LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED *
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR *
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
* *
******************************************************************************/
/*!
@file md5_kernel.cu
@brief CUDA implementation of MD5
*/
//Left rotate
#define ROTL(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
//Core function
#define md5round_core(a,b,c,d,k,s,t,f) a = ROTL(a + f(b,c,d) + k + t, s) + b;
//Round ops
#define Ff(b,c,d) (((b) & (c)) | (~(b) & (d)))
#define Fg(b,c,d) (((b) & (d)) | (~(d) & (c)))
#define Fh(b,c,d) ((b) ^ (c) ^ (d))
#define Fi(b,c,d) ((c) ^ ((b) | ~(d)))
//Rounds
#define md5round_f(a,b,c,d,i,n,t) md5round_core(a,b,c,d,buf##i,n,t,Ff)
#define md5round_g(a,b,c,d,i,n,t) md5round_core(a,b,c,d,buf##i,n,t,Fg)
#define md5round_h(a,b,c,d,i,n,t) md5round_core(a,b,c,d,buf##i,n,t,Fh)
#define md5round_i(a,b,c,d,i,n,t) md5round_core(a,b,c,d,buf##i,n,t,Fi)
//Textures
texture<int, 1, hipReadModeElementType> texCharset;
//Fake-array macros
#define AddPadding(num) case num: \
buf##num = (buf##num & ~paddmask) | padding; \
break
#define InitGuess(num, a,b,c,d) \
{ \
/* Calculate the four indices */ \
LstartInit(lstart3, a); \
LstartInit(lstart2, b); \
LstartInit(lstart1, c); \
LstartInit(lstart0, d); \
/* Pack four elements into the int (if we exceed length, padding will overwrite the garbage) */ \
buf##num = \
(charset[lstart3] << 24) | \
(charset[lstart2] << 16) | \
(charset[lstart1] << 8) | \
charset[lstart0]; \
}
#define LstartInit(ls, num) \
{ \
/* Get initial value and apply carry-in */ \
ls = carry + start[num]; \
/* Rightmost value? Bump by index */ \
if(num == lm1) \
ls += index; \
/* Carry out */ \
if(ls >= base && num<len) \
{ \
/* Calculate carry */ \
carry = ls / base; \
/* Update this digit */ \
ls %= base; \
} \
else \
carry = 0; \
}
#define SaveOutput(num) case num: \
po[num] = buf##num;
/*!
@brief CUDA implementation of MD5
Thread-per-block requirement: minimum 64
@param gtarget Target value (four ints, little endian)
@param gstart Start index in charset (array of 32 ints, data is left aligned, unused values are at right)
@param gsalt Salt (not used)
@param status Set to true by a thread which succeeds in cracking the hash
@param output Set to the collision by a thread which succeeds in cracking the hash
@param base Length of the character set (passed in texCharset texture)
@param len Length of valid data in gstart
@param saltlen Length of salt (not used)
@param hashcount Number of hashes being tested (not used)
*/
extern "C" __global__ void md5Kernel(int* gtarget, int* gstart, char* gsalt, char* status, char* output, int base, int len, int saltlen, int hashcount)
{
//Get our position in the grid
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
//Cache charset in shmem
__shared__ char charset[256];
if(threadIdx.x < ceil((float)base / 4))
{
int* ccs = (int*)&charset[0];
ccs[threadIdx.x] = tex1Dfetch(texCharset, threadIdx.x);
}
//Cache start value
__shared__ int start[32];
if(threadIdx.x < len)
start[threadIdx.x] = gstart[threadIdx.x];
//Cache target value
__shared__ int target[4];
if(threadIdx.x < 4)
target[threadIdx.x] = gtarget[threadIdx.x];
//Wait for all cache filling to finish
__syncthreads();
//Do the core processing
#include "md5_kernel_core.h"
//Check results
if(target[0] == a && target[1] == b && target[2] == c && target[3] == d)
{
*status = 1;
int* po = (int*)output;
switch(len / 4)
{
SaveOutput(7);
SaveOutput(6);
SaveOutput(5);
SaveOutput(4);
SaveOutput(3);
SaveOutput(2);
SaveOutput(1);
SaveOutput(0);
}
}
}
/*!
@brief CUDA implementation of MD5 with batch processing support
Thread-per-block requirement: minimum 64
@param gtarget Target value (four ints, little endian)
@param gstart Start index in charset (array of 32 ints, data is left aligned, unused values are at right)
@param gsalt Salt (not used)
@param status Set to true by a thread which succeeds in cracking the hash
@param output Set to the collision by a thread which succeeds in cracking the hash
@param base Length of the character set (passed in texCharset texture)
@param len Length of valid data in gstart
@param saltlen Length of salt (not used)
@param hashcount Number of hashes being tested.
*/
extern "C" __global__ void md5BatchKernel(int* gtarget, int* gstart, char* gsalt, char* status, char* output, int base, int len, int saltlen, int hashcount)
{
//Get our position in the grid
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
//Cache charset in shmem
__shared__ char charset[256];
if(threadIdx.x < ceil((float)base / 4))
{
int* ccs = (int*)&charset[0];
ccs[threadIdx.x] = tex1Dfetch(texCharset, threadIdx.x);
}
//Cache start value
__shared__ int start[32];
if(threadIdx.x < len)
start[threadIdx.x] = gstart[threadIdx.x];
//Cache target value
__shared__ int target[4 * 128];
if(threadIdx.x < 64)
{
int td = threadIdx.x;
if(td < hashcount)
{
for(int i=0; i<4; i++)
target[4*td + i] = gtarget[4*td + i];
}
if(td > 64)
{
td -= 64;
if(td < hashcount)
{
for(int i=0; i<4; i++)
target[4*td + i] = gtarget[4*td + i];
}
}
}
//Wait for all cache filling to finish
__syncthreads();
//Do the core processing
#include "md5_kernel_core.h"
for(int i=0; i<hashcount; i++)
{
int* xtarget = target + (4*i);
//Check results
if(xtarget[0] == a && xtarget[1] == b && xtarget[2] == c && xtarget[3] == d)
{
status[i] = 1;
int* po = (int*)output + (i*8);
switch(len / 4)
{
SaveOutput(7);
SaveOutput(6);
SaveOutput(5);
SaveOutput(4);
SaveOutput(3);
SaveOutput(2);
SaveOutput(1);
SaveOutput(0);
}
}
}
}
| b73044b6530df01f6887bfff35431a36b9a6129d.cu | /******************************************************************************
* *
* Distributed Hash Cracker v3.0 *
* *
* Copyright (c) 2009 RPISEC. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without modifi- *
* cation, are permitted provided that the following conditions are met: *
* *
* * Redistributions of source code must retain the above copyright notice *
* this list of conditions and the following disclaimer. *
* *
* * Redistributions in binary form must reproduce the above copyright *
* notice, this list of conditions and the following disclaimer in the *
* documentation and/or other materials provided with the distribution. *
* *
* * Neither the name of RPISEC nor the names of its contributors may be *
* used to endorse or promote products derived from this software without *
* specific prior written permission. *
* *
* THIS SOFTWARE IS PROVIDED BY RPISEC "AS IS" AND ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN *
* NO EVENT SHALL RPISEC BE HELD LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED *
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR *
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
* *
******************************************************************************/
/*!
@file md5_kernel.cu
@brief CUDA implementation of MD5
*/
//Left rotate
#define ROTL(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
//Core function
#define md5round_core(a,b,c,d,k,s,t,f) a = ROTL(a + f(b,c,d) + k + t, s) + b;
//Round ops
#define Ff(b,c,d) (((b) & (c)) | (~(b) & (d)))
#define Fg(b,c,d) (((b) & (d)) | (~(d) & (c)))
#define Fh(b,c,d) ((b) ^ (c) ^ (d))
#define Fi(b,c,d) ((c) ^ ((b) | ~(d)))
//Rounds
#define md5round_f(a,b,c,d,i,n,t) md5round_core(a,b,c,d,buf##i,n,t,Ff)
#define md5round_g(a,b,c,d,i,n,t) md5round_core(a,b,c,d,buf##i,n,t,Fg)
#define md5round_h(a,b,c,d,i,n,t) md5round_core(a,b,c,d,buf##i,n,t,Fh)
#define md5round_i(a,b,c,d,i,n,t) md5round_core(a,b,c,d,buf##i,n,t,Fi)
//Textures
texture<int, 1, cudaReadModeElementType> texCharset;
//Fake-array macros
#define AddPadding(num) case num: \
buf##num = (buf##num & ~paddmask) | padding; \
break
#define InitGuess(num, a,b,c,d) \
{ \
/* Calculate the four indices */ \
LstartInit(lstart3, a); \
LstartInit(lstart2, b); \
LstartInit(lstart1, c); \
LstartInit(lstart0, d); \
/* Pack four elements into the int (if we exceed length, padding will overwrite the garbage) */ \
buf##num = \
(charset[lstart3] << 24) | \
(charset[lstart2] << 16) | \
(charset[lstart1] << 8) | \
charset[lstart0]; \
}
#define LstartInit(ls, num) \
{ \
/* Get initial value and apply carry-in */ \
ls = carry + start[num]; \
/* Rightmost value? Bump by index */ \
if(num == lm1) \
ls += index; \
/* Carry out */ \
if(ls >= base && num<len) \
{ \
/* Calculate carry */ \
carry = ls / base; \
/* Update this digit */ \
ls %= base; \
} \
else \
carry = 0; \
}
#define SaveOutput(num) case num: \
po[num] = buf##num;
/*!
@brief CUDA implementation of MD5
Thread-per-block requirement: minimum 64
@param gtarget Target value (four ints, little endian)
@param gstart Start index in charset (array of 32 ints, data is left aligned, unused values are at right)
@param gsalt Salt (not used)
@param status Set to true by a thread which succeeds in cracking the hash
@param output Set to the collision by a thread which succeeds in cracking the hash
@param base Length of the character set (passed in texCharset texture)
@param len Length of valid data in gstart
@param saltlen Length of salt (not used)
@param hashcount Number of hashes being tested (not used)
*/
extern "C" __global__ void md5Kernel(int* gtarget, int* gstart, char* gsalt, char* status, char* output, int base, int len, int saltlen, int hashcount)
{
//Get our position in the grid
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
//Cache charset in shmem
__shared__ char charset[256];
if(threadIdx.x < ceil((float)base / 4))
{
int* ccs = (int*)&charset[0];
ccs[threadIdx.x] = tex1Dfetch(texCharset, threadIdx.x);
}
//Cache start value
__shared__ int start[32];
if(threadIdx.x < len)
start[threadIdx.x] = gstart[threadIdx.x];
//Cache target value
__shared__ int target[4];
if(threadIdx.x < 4)
target[threadIdx.x] = gtarget[threadIdx.x];
//Wait for all cache filling to finish
__syncthreads();
//Do the core processing
#include "md5_kernel_core.h"
//Check results
if(target[0] == a && target[1] == b && target[2] == c && target[3] == d)
{
*status = 1;
int* po = (int*)output;
switch(len / 4)
{
SaveOutput(7);
SaveOutput(6);
SaveOutput(5);
SaveOutput(4);
SaveOutput(3);
SaveOutput(2);
SaveOutput(1);
SaveOutput(0);
}
}
}
/*!
@brief CUDA implementation of MD5 with batch processing support
Thread-per-block requirement: minimum 64
@param gtarget Target value (four ints, little endian)
@param gstart Start index in charset (array of 32 ints, data is left aligned, unused values are at right)
@param gsalt Salt (not used)
@param status Set to true by a thread which succeeds in cracking the hash
@param output Set to the collision by a thread which succeeds in cracking the hash
@param base Length of the character set (passed in texCharset texture)
@param len Length of valid data in gstart
@param saltlen Length of salt (not used)
@param hashcount Number of hashes being tested.
*/
extern "C" __global__ void md5BatchKernel(int* gtarget, int* gstart, char* gsalt, char* status, char* output, int base, int len, int saltlen, int hashcount)
{
//Get our position in the grid
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
//Cache charset in shmem
__shared__ char charset[256];
if(threadIdx.x < ceil((float)base / 4))
{
int* ccs = (int*)&charset[0];
ccs[threadIdx.x] = tex1Dfetch(texCharset, threadIdx.x);
}
//Cache start value
__shared__ int start[32];
if(threadIdx.x < len)
start[threadIdx.x] = gstart[threadIdx.x];
//Cache target value
__shared__ int target[4 * 128];
if(threadIdx.x < 64)
{
int td = threadIdx.x;
if(td < hashcount)
{
for(int i=0; i<4; i++)
target[4*td + i] = gtarget[4*td + i];
}
if(td > 64)
{
td -= 64;
if(td < hashcount)
{
for(int i=0; i<4; i++)
target[4*td + i] = gtarget[4*td + i];
}
}
}
//Wait for all cache filling to finish
__syncthreads();
//Do the core processing
#include "md5_kernel_core.h"
for(int i=0; i<hashcount; i++)
{
int* xtarget = target + (4*i);
//Check results
if(xtarget[0] == a && xtarget[1] == b && xtarget[2] == c && xtarget[3] == d)
{
status[i] = 1;
int* po = (int*)output + (i*8);
switch(len / 4)
{
SaveOutput(7);
SaveOutput(6);
SaveOutput(5);
SaveOutput(4);
SaveOutput(3);
SaveOutput(2);
SaveOutput(1);
SaveOutput(0);
}
}
}
}
|
cd14c066f8aa1cbf8fe67df7eae62f6c994607ed.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//Namespaces.
using namespace std;
using namespace cv;
//Hallar promedio y retornarlo.
__device__ int cal_intensity(const int* image, int x, int y, int width, int height, int k){
int ic, ir, fc, fr, n;
x-(k/2)+1<0 ? ic = 0 : ic = x-(k/2);
y-(k/2)+1<0 ? ir = 0 : ir = y-(k/2);
x+(k/2)+1>width ? fc = width : fc = x+(k/2)+1;
y+(k/2)+1>height ? fr = height : fr = y+(k/2)+1;
int red = 0, green = 0, blue = 0;
for(int i=ic; i<fc; i++){
for(int j=ir; j<fr; j++){
n = image[j+i*height];
blue += (n % 1000);
green += (n/1000) % 1000;
red += (n/1000000) % 1000;
}
}
blue = blue / (k*k);
green = green / (k*k);
red = red / (k*k);
return (red*1000000)+(green*1000)+blue;
}
//Funcion de cada hilo.
__global__ void blur_thread(const int* d_image, const int width, const int height, const int kernel, const int total_threads, int* d_blur){
int id = blockDim.x * blockIdx.x + threadIdx.x;
int ir = id * ( height / total_threads );
int fr = (id + 1) * ( height / total_threads );
if(id < height){
for(int i=0; i<width; i++){
for(int j=ir; j<fr; j++){
//d_blur[j+i*height] = d_image[j+i*height];;
d_blur[j+i*height] = cal_intensity(d_image, i, j, width, height, kernel);
//d_blur[j*width+i] = cal_intensity(d_image, i, j, width, height, kernel);
//d_blur[j+i*height] = d_image[j+i*height];
//d_blur[j+i*height] = d_image[j+i*height];
}
}
}
}
//Main.
int main(int argc, char** argv){
//Variables.
char* image_name;
Mat image, blur_image;
int kernel_size, num_threads, num_blocks;
//Recibir argumentos.
image_name = argv[1];
kernel_size = atoi(argv[2]);
num_threads = atoi(argv[3]);
if(argc != 4){
cout<<"Numero incorrecto de argumentos.\n";
return -1;
}
//Leer imagen
image = imread(image_name);
if(!image.data){
cout<<"Imagen no reconocida.\n";
return -1;
}
//Inicializar variables
int width = image.cols;
int height = image.rows;
blur_image = image.clone();
hipError_t err = hipSuccess;
//Malloc host
int numElements = width*height;
size_t size = numElements * sizeof(int);
int *h_image = (int *)malloc(size);
int *h_blur = (int *)malloc(size);
//Imagen a un vector 3D
int aux = 0;
for(int i=0; i<width; i++){
for(int j=0; j<height; j++){
h_image[aux] = image.at<Vec3b>(j,i)[0];
h_image[aux] += image.at<Vec3b>(j,i)[1] * 1000;
h_image[aux] += image.at<Vec3b>(j,i)[2] * 1000000;
aux++;
}
}
//Malloc devise
//Imagen
int *d_image = NULL;
err = hipMalloc((void **)&d_image, size);
if(err != hipSuccess){
cout<<"Error separando espacio imagen normal en GPU "<<hipGetErrorString(err)<<endl;
return -1;
}
//Clon
int *d_blur = NULL;
err = hipMalloc((void **)&d_blur, size);
if(err != hipSuccess){
cout<<"Error separando espacio imagen difuminada en GPU "<<hipGetErrorString(err)<<endl;
return -1;
}
//MemoryCopy
//Imagen
err = hipMemcpy(d_image, h_image, size, hipMemcpyHostToDevice);
if (err != hipSuccess){
cout<<"Error copiando datos a GPU "<<hipGetErrorString(err)<<endl;
return -1;
}
//Lanzar GPU
int blocksPerGrid = (height + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( blur_thread), dim3(blocksPerGrid), dim3(num_threads), 0, 0, d_image, width, height, kernel_size, height, d_blur);
err = hipGetLastError();
if (err != hipSuccess){
cout<<"Fallo al lanzar Kerndel de GPU "<<hipGetErrorString(err)<<endl;
return -1;
}
//Copiar de GPU a CPU
cout<<"Copiando datos desde la GPU a CPU."<<endl;
err = hipMemcpy(h_blur, d_blur, size, hipMemcpyDeviceToHost);
if (err != hipSuccess){
cout<<"Error copiando desde GPU a CPU "<<hipGetErrorString(err)<<endl;
return -1;
}
//Escribir imagen difuminada.
aux = 0;
for(int i=0; i<width; i++){
for(int j=0; j<height; j++){
blur_image.at<Vec3b>(j,i)[0] = (unsigned char)((h_blur[aux]) % 1000);
blur_image.at<Vec3b>(j,i)[1] = (unsigned char)((h_blur[aux]/1000) % 1000);
blur_image.at<Vec3b>(j,i)[2] = (unsigned char)((h_blur[aux]/1000000) % 1000);
aux++;
}
}
imwrite("blur_image.jpg", blur_image);
//Libear espacio
err = hipFree(d_image);
if (err != hipSuccess){
cout<<"Error liberando memoria de imagen normal "<<hipGetErrorString(err)<<endl;
return -1;
}
err = hipFree(d_blur);
if (err != hipSuccess){
cout<<"Error liberando memoria de imagen difuminada "<<hipGetErrorString(err)<<endl;
return -1;
}
free(h_image);
free(h_blur);
return 0;
}
| cd14c066f8aa1cbf8fe67df7eae62f6c994607ed.cu | #include <stdio.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <cuda.h>
#include <cuda_runtime.h>
//Namespaces.
using namespace std;
using namespace cv;
//Hallar promedio y retornarlo.
__device__ int cal_intensity(const int* image, int x, int y, int width, int height, int k){
int ic, ir, fc, fr, n;
x-(k/2)+1<0 ? ic = 0 : ic = x-(k/2);
y-(k/2)+1<0 ? ir = 0 : ir = y-(k/2);
x+(k/2)+1>width ? fc = width : fc = x+(k/2)+1;
y+(k/2)+1>height ? fr = height : fr = y+(k/2)+1;
int red = 0, green = 0, blue = 0;
for(int i=ic; i<fc; i++){
for(int j=ir; j<fr; j++){
n = image[j+i*height];
blue += (n % 1000);
green += (n/1000) % 1000;
red += (n/1000000) % 1000;
}
}
blue = blue / (k*k);
green = green / (k*k);
red = red / (k*k);
return (red*1000000)+(green*1000)+blue;
}
//Funcion de cada hilo.
__global__ void blur_thread(const int* d_image, const int width, const int height, const int kernel, const int total_threads, int* d_blur){
int id = blockDim.x * blockIdx.x + threadIdx.x;
int ir = id * ( height / total_threads );
int fr = (id + 1) * ( height / total_threads );
if(id < height){
for(int i=0; i<width; i++){
for(int j=ir; j<fr; j++){
//d_blur[j+i*height] = d_image[j+i*height];;
d_blur[j+i*height] = cal_intensity(d_image, i, j, width, height, kernel);
//d_blur[j*width+i] = cal_intensity(d_image, i, j, width, height, kernel);
//d_blur[j+i*height] = d_image[j+i*height];
//d_blur[j+i*height] = d_image[j+i*height];
}
}
}
}
//Main.
int main(int argc, char** argv){
//Variables.
char* image_name;
Mat image, blur_image;
int kernel_size, num_threads, num_blocks;
//Recibir argumentos.
image_name = argv[1];
kernel_size = atoi(argv[2]);
num_threads = atoi(argv[3]);
if(argc != 4){
cout<<"Numero incorrecto de argumentos.\n";
return -1;
}
//Leer imagen
image = imread(image_name);
if(!image.data){
cout<<"Imagen no reconocida.\n";
return -1;
}
//Inicializar variables
int width = image.cols;
int height = image.rows;
blur_image = image.clone();
cudaError_t err = cudaSuccess;
//Malloc host
int numElements = width*height;
size_t size = numElements * sizeof(int);
int *h_image = (int *)malloc(size);
int *h_blur = (int *)malloc(size);
//Imagen a un vector 3D
int aux = 0;
for(int i=0; i<width; i++){
for(int j=0; j<height; j++){
h_image[aux] = image.at<Vec3b>(j,i)[0];
h_image[aux] += image.at<Vec3b>(j,i)[1] * 1000;
h_image[aux] += image.at<Vec3b>(j,i)[2] * 1000000;
aux++;
}
}
//Malloc devise
//Imagen
int *d_image = NULL;
err = cudaMalloc((void **)&d_image, size);
if(err != cudaSuccess){
cout<<"Error separando espacio imagen normal en GPU "<<cudaGetErrorString(err)<<endl;
return -1;
}
//Clon
int *d_blur = NULL;
err = cudaMalloc((void **)&d_blur, size);
if(err != cudaSuccess){
cout<<"Error separando espacio imagen difuminada en GPU "<<cudaGetErrorString(err)<<endl;
return -1;
}
//MemoryCopy
//Imagen
err = cudaMemcpy(d_image, h_image, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
cout<<"Error copiando datos a GPU "<<cudaGetErrorString(err)<<endl;
return -1;
}
//Lanzar GPU
int blocksPerGrid = (height + num_threads - 1) / num_threads;
blur_thread<<<blocksPerGrid, num_threads>>>(d_image, width, height, kernel_size, height, d_blur);
err = cudaGetLastError();
if (err != cudaSuccess){
cout<<"Fallo al lanzar Kerndel de GPU "<<cudaGetErrorString(err)<<endl;
return -1;
}
//Copiar de GPU a CPU
cout<<"Copiando datos desde la GPU a CPU."<<endl;
err = cudaMemcpy(h_blur, d_blur, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
cout<<"Error copiando desde GPU a CPU "<<cudaGetErrorString(err)<<endl;
return -1;
}
//Escribir imagen difuminada.
aux = 0;
for(int i=0; i<width; i++){
for(int j=0; j<height; j++){
blur_image.at<Vec3b>(j,i)[0] = (unsigned char)((h_blur[aux]) % 1000);
blur_image.at<Vec3b>(j,i)[1] = (unsigned char)((h_blur[aux]/1000) % 1000);
blur_image.at<Vec3b>(j,i)[2] = (unsigned char)((h_blur[aux]/1000000) % 1000);
aux++;
}
}
imwrite("blur_image.jpg", blur_image);
//Libear espacio
err = cudaFree(d_image);
if (err != cudaSuccess){
cout<<"Error liberando memoria de imagen normal "<<cudaGetErrorString(err)<<endl;
return -1;
}
err = cudaFree(d_blur);
if (err != cudaSuccess){
cout<<"Error liberando memoria de imagen difuminada "<<cudaGetErrorString(err)<<endl;
return -1;
}
free(h_image);
free(h_blur);
return 0;
}
|
9c6d29b74f67b9488059736baa6a9c337a0b8f65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file test_tiled_tree.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. in. Marek Nacz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#include <helper_cuda.h>
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <cmath>
#include <vector>
// #ifndef RD_DEBUG
// #define NDEBUG // for disabling assert macro
// #endif
#include <assert.h>
#if defined(RD_DEBUG) && !defined(CUB_STDERR)
#define CUB_STDERR
#endif
#include "rd/gpu/device/tiled/tiled_tree.cuh"
#include "rd/gpu/device/tiled/tree_drawer.cuh"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "tests/test_util.hpp"
#include "cub/test_util.h"
//----------------------------------------------
// global variables / constants
//----------------------------------------------
static constexpr int BLOCK_THREADS = 128;
static constexpr int POINTS_PER_THREAD = 4;
static constexpr int MAX_TEST_DIM = 3;
static constexpr int MAX_POINTS_NUM = int(1e7);
static constexpr int RD_CUDA_MAX_SYNC_DEPTH = 10;
static constexpr size_t HUNDRED_MB_IN_BYTES = 100 * 1024 * 1024;
static int g_devId = 0;
static bool g_verbose = false;
static std::string g_devName = "";
//------------------------------------------------------------------------
static void configureDevice(
size_t neededMemSize)
{
checkCudaErrors(hipDeviceReset());
checkCudaErrors(hipSetDevice(g_devId));
checkCudaErrors(hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, RD_CUDA_MAX_SYNC_DEPTH));
checkCudaErrors(hipDeviceSetLimit(hipLimitMallocHeapSize, neededMemSize));
}
//------------------------------------------------------------------------
struct TileProcessOp
{
template <typename NodeT>
__device__ __forceinline__ void operator()(NodeT const * node) const
{
if (threadIdx.x == 0)
{
_CubLog("---- Tile TileProcessOp() node id: %d, pointsCnt: %d, neighboursCnt: %d\n",
node->id, node->pointsCnt, node->neighboursCnt);
}
}
};
//------------------------------------------------------------------------
// Test kernels
//------------------------------------------------------------------------
template <
typename TiledTreeT,
typename TileProcessOpT,
int DIM,
typename T>
__launch_bounds__ (1)
static __global__ void buildTreeKernel(
TiledTreeT * tree,
T const * inputPoints,
int pointsNum,
rd::gpu::BoundingBox<DIM, T> * d_globalBBox,
int maxTileCapacity,
T sphereRadius,
T extensionFactor,
cub::ArrayWrapper<int, DIM> const initTileCntPerDim,
int stride)
{
// last arg: true -> debugSynchronous
// new(tree) TiledTreeT(maxTileCapacity, sphereRadius, extensionFactor, true);
new(tree) TiledTreeT(maxTileCapacity, sphereRadius, extensionFactor, false);
TileProcessOpT tileProcessOp;
hipStream_t buildTreeStream;
rdDevCheckCall(hipStreamCreateWithFlags(&buildTreeStream, hipStreamNonBlocking));
rdDevCheckCall(tree->buildTree(
inputPoints, pointsNum, initTileCntPerDim, d_globalBBox, tileProcessOp, buildTreeStream,
stride));
rdDevCheckCall(hipStreamDestroy(buildTreeStream));
}
template <
typename TiledTreeT>
__launch_bounds__ (1)
static __global__ void deleteTiledTree(
TiledTreeT * tree)
{
tree->~TiledTreeT();
}
//------------------------------------------------------------------------
template <
int DIM,
rd::DataMemoryLayout IN_MEM_LAYOUT,
rd::DataMemoryLayout OUT_MEM_LAYOUT,
typename T>
void testBuildTree(
T const * h_inputPoints,
int pointsNum,
T sphereRadius)
{
typedef rd::gpu::tiled::TiledTreePolicy<
BLOCK_THREADS,
POINTS_PER_THREAD,
cub::LOAD_LDG,
rd::gpu::IO_BACKEND_CUB>
TiledTreePolicyT;
typedef rd::gpu::tiled::TiledTree<
TiledTreePolicyT,
DIM,
IN_MEM_LAYOUT,
OUT_MEM_LAYOUT,
T>
TiledTreeT;
int maxTileCapacity = 0.18 * pointsNum;
T extensionFactor = 1.4;
T * d_inputPoints;
TiledTreeT * d_tree;
cub::ArrayWrapper<int, DIM> initTileCntPerDim;
int inPtsStride = DIM;
if (IN_MEM_LAYOUT == rd::ROW_MAJOR)
{
checkCudaErrors(hipMalloc(&d_inputPoints, pointsNum * DIM * sizeof(T)));
}
else if (IN_MEM_LAYOUT == rd::COL_MAJOR)
{
size_t pitch = 0;
checkCudaErrors(hipMallocPitch(&d_inputPoints, &pitch, pointsNum * sizeof(T),
DIM));
inPtsStride = pitch / sizeof(T);
}
else
{
throw std::runtime_error("Unsupported memory layout!");
}
checkCudaErrors(hipMalloc(&d_tree, sizeof(TiledTreeT)));
// Hardcoded for 3 tiles per dimension
for (int k =0; k < DIM; ++k)
{
initTileCntPerDim.array[k] = 3;
}
if (IN_MEM_LAYOUT == rd::ROW_MAJOR)
{
rd::gpu::rdMemcpy<rd::ROW_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
d_inputPoints, h_inputPoints, DIM, pointsNum, DIM, inPtsStride);
}
else if (IN_MEM_LAYOUT == rd::COL_MAJOR)
{
rd::gpu::rdMemcpy2D<rd::COL_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
d_inputPoints, h_inputPoints, DIM, pointsNum, inPtsStride * sizeof(T),
DIM * sizeof(T));
}
else
{
throw std::runtime_error("Unsupported memory layout!");
}
checkCudaErrors(hipDeviceSynchronize());
rd::gpu::BoundingBox<DIM, T> globalBBox;
checkCudaErrors(globalBBox.template findBounds<IN_MEM_LAYOUT>(
d_inputPoints, pointsNum, inPtsStride));
globalBBox.calcDistances();
// allocate & copy memory for device global bounding box
rd::gpu::BoundingBox<DIM, T> *d_globalBBox;
checkCudaErrors(hipMalloc(&d_globalBBox, sizeof(rd::gpu::BoundingBox<DIM,T>)));
checkCudaErrors(hipMemcpy(d_globalBBox, &globalBBox, sizeof(rd::gpu::BoundingBox<DIM,T>),
hipMemcpyHostToDevice));
checkCudaErrors(hipDeviceSynchronize());
std::cout << "Invoking buildTreeKernel" << std::endl;
hipLaunchKernelGGL(( buildTreeKernel<TiledTreeT, TileProcessOp, DIM/*, IN_MEM_LAYOUT*/>), dim3(1),dim3(1), 0, 0,
d_tree, d_inputPoints, pointsNum, d_globalBBox, maxTileCapacity, sphereRadius,
extensionFactor, initTileCntPerDim, inPtsStride);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// draw graphs
rd::gpu::tiled::util::TreeDrawer<DIM, IN_MEM_LAYOUT, OUT_MEM_LAYOUT, TiledTreeT, T> treeDrawer(
d_tree, d_inputPoints, pointsNum, inPtsStride);
treeDrawer.drawBounds();
treeDrawer.drawEachTile();
std::cout << "Invoking deleteTiledTree kernel" << std::endl;
hipLaunchKernelGGL(( deleteTiledTree<TiledTreeT>), dim3(1), dim3(1), 0, 0, d_tree);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//-------------------------------------------------------------------------------
// clean-up
//-------------------------------------------------------------------------------
checkCudaErrors(hipFree(d_tree));
checkCudaErrors(hipFree(d_inputPoints));
checkCudaErrors(hipFree(d_globalBBox));
}
template <
int DIM,
typename T>
void testMemLayout(
int pointNum,
PointCloud<T> const & pc)
{
std::vector<T> && points = pc.extractPart(pointNum, DIM);
T sphereRadius = 1.45f * pc.stddev_;
if (g_verbose && DIM <= 3)
{
rd::GraphDrawer<T> gDrawer;
std::ostringstream os;
os << typeid(T).name() << "_" << DIM << "D_" << g_devName;
os << "_initial_samples_set";
gDrawer.showPoints(os.str(), points.data(), pointNum, DIM);
}
std::cout << rd::HLINE << std::endl;
std::cout << "<<<<< ROW_MAJOR >>>>>" << std::endl;
testBuildTree<DIM, rd::ROW_MAJOR, rd::ROW_MAJOR>(points.data(), pointNum, sphereRadius);
std::cout << rd::HLINE << std::endl;
std::cout << "<<<<< COL_MAJOR >>>>>" << std::endl;
testBuildTree<DIM, rd::COL_MAJOR, rd::COL_MAJOR>(points.data(), pointNum, sphereRadius);
}
/**
* @brief helper structure for static for loop over dimension
*/
struct IterateDimensions
{
template <typename D, typename T>
static void impl(
D idx,
int pointNum,
PointCloud<T> const & pc)
{
std::cout << rd::HLINE << std::endl;
std::cout << ">>>> Dimension: " << idx << "D\n";
testMemLayout<D::value>(pointNum, pc);
}
};
/**
* @brief Test detection time & quality relative to point dimension
*/
template <
int DIM,
typename T>
// typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
struct TestDimensions
{
static void impl(
PointCloud<T> & pc,
int pointNum)
{
static_assert(DIM != 0, "DIM equal to zero!\n");
pc.pointCnt_ = pointNum;
pc.initializeData();
size_t neededMemSize = 5 * pointNum * DIM * sizeof(T);
neededMemSize = ::max(HUNDRED_MB_IN_BYTES, neededMemSize);
std::cout << "Reserve " << float(neededMemSize) / 1024.f / 1024.f
<< " Mb for malloc heap size" << std::endl;
configureDevice(neededMemSize);
std::cout << rd::HLINE << std::endl;
std::cout << ">>>> Dimension: " << DIM << "D\n";
testMemLayout<DIM>(pointNum, pc);
}
};
template <typename T>
struct TestDimensions<0, T>
{
static void impl(
PointCloud<T> & pc,
int pointNum)
{
pc.pointCnt_ = pointNum;
pc.dim_ = MAX_TEST_DIM;
pc.initializeData();
size_t neededMemSize = 10 * pointNum * MAX_TEST_DIM * sizeof(T);
neededMemSize = ::max(HUNDRED_MB_IN_BYTES, neededMemSize);
std::cout << "Reserve " << float(neededMemSize) / 1024.f / 1024.f
<< " Mb for malloc heap size" << std::endl;
configureDevice(neededMemSize);
StaticFor<1, MAX_TEST_DIM, IterateDimensions>::impl(pointNum, pc);
}
};
/**
* @brief Test detection time & quality relative to number of points
*/
template <
typename T,
int DIM = 0,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
static void testSize(
PointCloud<T> & pc,
int pointNum = -1)
{
if (pointNum > 0)
{
TestDimensions<DIM, T>::impl(pc, pointNum);
}
else
{
for (int k = 1000; k <= MAX_POINTS_NUM; k *= 10)
{
std::cout << "\n//------------------------------------------"
<< "\n//\t\t pointNum: " << k
<< "\n//------------------------------------------\n";
TestDimensions<DIM, T>::impl(pc, k);
}
}
}
int main(int argc, char const **argv)
{
float a = -1.f, b = -1.f, stddev = -1.f;
int pointNum = -1;
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--a <a parameter of spiral or length if dim > 3>]\n"
"\t\t[--b <b parameter of spiral or ignored if dim > 3>]\n"
"\t\t[--stddev <standard deviation of generated samples>]\n"
"\t\t[--size <number of points>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--v <verbose>]\n"
"\n", argv[0]);
exit(0);
}
if (args.CheckCmdLineFlag("a"))
{
args.GetCmdLineArgument("a", a);
}
if (args.CheckCmdLineFlag("b"))
{
args.GetCmdLineArgument("b", b);
}
if (args.CheckCmdLineFlag("stddev"))
{
args.GetCmdLineArgument("stddev", stddev);
}
if (args.CheckCmdLineFlag("size"))
{
args.GetCmdLineArgument("size", pointNum);
}
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", g_devId);
}
if (args.CheckCmdLineFlag("v"))
{
g_verbose = true;
}
checkCudaErrors(deviceInit(g_devId));
// set device name for logging and drawing purposes
hipDeviceProp_t devProp;
checkCudaErrors(hipGetDeviceProperties(&devProp, g_devId));
g_devName = devProp.name;
#ifdef QUICK_TEST
if (pointNum < 0 ||
a < 0 ||
b < 0 ||
stddev < 0)
{
std::cout << "Have to specify parameters! Rerun with --help for help.\n";
exit(1);
}
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (spiral) float: "
<< "\n//------------------------------------------\n";
const int dim = 3;
PointCloud<float> && fpc = SpiralPointCloud<float>(a, b, pointNum, dim, stddev);
TestDimensions<dim, float>::impl(fpc, pointNum);
// std::cout << "\n//------------------------------------------"
// << "\n//\t\t (spiral) double: "
// << "\n//------------------------------------------\n";
// PointCloud<double> && dpc = SpiralPointCloud<double>(a, b, pointNum, dim, stddev);
// TestDimensions<dim, double>::impl(dpc, pointNum);
#else
// 1e6 2D points, spiral a=22, b=10, stddev=4
#ifndef RD_DOUBLE_PRECISION
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (spiral) float: "
<< "\n//------------------------------------------\n";
PointCloud<float> && fpc2d = SpiralPointCloud<float>(22.f, 10.f, 0, 2, 4.f);
PointCloud<float> && fpc3d = SpiralPointCloud<float>(22.f, 10.f, 0, 3, 4.f);
TestDimensions<2, float>::impl(fpc2d, int(1e6));
TestDimensions<3, float>::impl(fpc3d, int(1e6));
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (segment) float: "
<< "\n//------------------------------------------\n";
PointCloud<float> && fpc2 = SegmentPointCloud<float>(1000.f, 0, 0, 4.f);
testSize<float>(fpc2);
#else
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (spiral) double: "
<< "\n//------------------------------------------\n";
PointCloud<double> && dpc2d = SpiralPointCloud<double>(22.0, 10.0, 0, 2, 4.0);
PointCloud<double> && dpc3d = SpiralPointCloud<double>(22.0, 10.0, 0, 3, 4.0);
TestDimensions<2, double>::impl(dpc2d, int(1e6));
TestDimensions<3, double>::impl(dpc3d, int(1e6));
PointCloud<double> && dpc2 = SegmentPointCloud<double>(1000.0, 0, 0, 4.0);
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (segment) double: "
<< "\n//------------------------------------------\n";
testSize<double>(dpc2);
#endif
#endif
checkCudaErrors(hipDeviceReset());
std::cout << rd::HLINE << std::endl;
std::cout << "END!" << std::endl;
return EXIT_SUCCESS;
}
| 9c6d29b74f67b9488059736baa6a9c337a0b8f65.cu | /**
* @file test_tiled_tree.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. inż. Marek Nałęcz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#include <helper_cuda.h>
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <cmath>
#include <vector>
// #ifndef RD_DEBUG
// #define NDEBUG // for disabling assert macro
// #endif
#include <assert.h>
#if defined(RD_DEBUG) && !defined(CUB_STDERR)
#define CUB_STDERR
#endif
#include "rd/gpu/device/tiled/tiled_tree.cuh"
#include "rd/gpu/device/tiled/tree_drawer.cuh"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "tests/test_util.hpp"
#include "cub/test_util.h"
//----------------------------------------------
// global variables / constants
//----------------------------------------------
static constexpr int BLOCK_THREADS = 128;
static constexpr int POINTS_PER_THREAD = 4;
static constexpr int MAX_TEST_DIM = 3;
static constexpr int MAX_POINTS_NUM = int(1e7);
static constexpr int RD_CUDA_MAX_SYNC_DEPTH = 10;
static constexpr size_t HUNDRED_MB_IN_BYTES = 100 * 1024 * 1024;
static int g_devId = 0;
static bool g_verbose = false;
static std::string g_devName = "";
//------------------------------------------------------------------------
static void configureDevice(
size_t neededMemSize)
{
checkCudaErrors(cudaDeviceReset());
checkCudaErrors(cudaSetDevice(g_devId));
checkCudaErrors(cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, RD_CUDA_MAX_SYNC_DEPTH));
checkCudaErrors(cudaDeviceSetLimit(cudaLimitMallocHeapSize, neededMemSize));
}
//------------------------------------------------------------------------
struct TileProcessOp
{
template <typename NodeT>
__device__ __forceinline__ void operator()(NodeT const * node) const
{
if (threadIdx.x == 0)
{
_CubLog("---- Tile TileProcessOp() node id: %d, pointsCnt: %d, neighboursCnt: %d\n",
node->id, node->pointsCnt, node->neighboursCnt);
}
}
};
//------------------------------------------------------------------------
// Test kernels
//------------------------------------------------------------------------
template <
typename TiledTreeT,
typename TileProcessOpT,
int DIM,
typename T>
__launch_bounds__ (1)
static __global__ void buildTreeKernel(
TiledTreeT * tree,
T const * inputPoints,
int pointsNum,
rd::gpu::BoundingBox<DIM, T> * d_globalBBox,
int maxTileCapacity,
T sphereRadius,
T extensionFactor,
cub::ArrayWrapper<int, DIM> const initTileCntPerDim,
int stride)
{
// last arg: true -> debugSynchronous
// new(tree) TiledTreeT(maxTileCapacity, sphereRadius, extensionFactor, true);
new(tree) TiledTreeT(maxTileCapacity, sphereRadius, extensionFactor, false);
TileProcessOpT tileProcessOp;
cudaStream_t buildTreeStream;
rdDevCheckCall(cudaStreamCreateWithFlags(&buildTreeStream, cudaStreamNonBlocking));
rdDevCheckCall(tree->buildTree(
inputPoints, pointsNum, initTileCntPerDim, d_globalBBox, tileProcessOp, buildTreeStream,
stride));
rdDevCheckCall(cudaStreamDestroy(buildTreeStream));
}
template <
typename TiledTreeT>
__launch_bounds__ (1)
static __global__ void deleteTiledTree(
TiledTreeT * tree)
{
tree->~TiledTreeT();
}
//------------------------------------------------------------------------
template <
int DIM,
rd::DataMemoryLayout IN_MEM_LAYOUT,
rd::DataMemoryLayout OUT_MEM_LAYOUT,
typename T>
void testBuildTree(
T const * h_inputPoints,
int pointsNum,
T sphereRadius)
{
typedef rd::gpu::tiled::TiledTreePolicy<
BLOCK_THREADS,
POINTS_PER_THREAD,
cub::LOAD_LDG,
rd::gpu::IO_BACKEND_CUB>
TiledTreePolicyT;
typedef rd::gpu::tiled::TiledTree<
TiledTreePolicyT,
DIM,
IN_MEM_LAYOUT,
OUT_MEM_LAYOUT,
T>
TiledTreeT;
int maxTileCapacity = 0.18 * pointsNum;
T extensionFactor = 1.4;
T * d_inputPoints;
TiledTreeT * d_tree;
cub::ArrayWrapper<int, DIM> initTileCntPerDim;
int inPtsStride = DIM;
if (IN_MEM_LAYOUT == rd::ROW_MAJOR)
{
checkCudaErrors(cudaMalloc(&d_inputPoints, pointsNum * DIM * sizeof(T)));
}
else if (IN_MEM_LAYOUT == rd::COL_MAJOR)
{
size_t pitch = 0;
checkCudaErrors(cudaMallocPitch(&d_inputPoints, &pitch, pointsNum * sizeof(T),
DIM));
inPtsStride = pitch / sizeof(T);
}
else
{
throw std::runtime_error("Unsupported memory layout!");
}
checkCudaErrors(cudaMalloc(&d_tree, sizeof(TiledTreeT)));
// Hardcoded for 3 tiles per dimension
for (int k =0; k < DIM; ++k)
{
initTileCntPerDim.array[k] = 3;
}
if (IN_MEM_LAYOUT == rd::ROW_MAJOR)
{
rd::gpu::rdMemcpy<rd::ROW_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
d_inputPoints, h_inputPoints, DIM, pointsNum, DIM, inPtsStride);
}
else if (IN_MEM_LAYOUT == rd::COL_MAJOR)
{
rd::gpu::rdMemcpy2D<rd::COL_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
d_inputPoints, h_inputPoints, DIM, pointsNum, inPtsStride * sizeof(T),
DIM * sizeof(T));
}
else
{
throw std::runtime_error("Unsupported memory layout!");
}
checkCudaErrors(cudaDeviceSynchronize());
rd::gpu::BoundingBox<DIM, T> globalBBox;
checkCudaErrors(globalBBox.template findBounds<IN_MEM_LAYOUT>(
d_inputPoints, pointsNum, inPtsStride));
globalBBox.calcDistances();
// allocate & copy memory for device global bounding box
rd::gpu::BoundingBox<DIM, T> *d_globalBBox;
checkCudaErrors(cudaMalloc(&d_globalBBox, sizeof(rd::gpu::BoundingBox<DIM,T>)));
checkCudaErrors(cudaMemcpy(d_globalBBox, &globalBBox, sizeof(rd::gpu::BoundingBox<DIM,T>),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "Invoking buildTreeKernel" << std::endl;
buildTreeKernel<TiledTreeT, TileProcessOp, DIM/*, IN_MEM_LAYOUT*/><<<1,1>>>(
d_tree, d_inputPoints, pointsNum, d_globalBBox, maxTileCapacity, sphereRadius,
extensionFactor, initTileCntPerDim, inPtsStride);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// draw graphs
rd::gpu::tiled::util::TreeDrawer<DIM, IN_MEM_LAYOUT, OUT_MEM_LAYOUT, TiledTreeT, T> treeDrawer(
d_tree, d_inputPoints, pointsNum, inPtsStride);
treeDrawer.drawBounds();
treeDrawer.drawEachTile();
std::cout << "Invoking deleteTiledTree kernel" << std::endl;
deleteTiledTree<TiledTreeT><<<1, 1>>>(d_tree);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//-------------------------------------------------------------------------------
// clean-up
//-------------------------------------------------------------------------------
checkCudaErrors(cudaFree(d_tree));
checkCudaErrors(cudaFree(d_inputPoints));
checkCudaErrors(cudaFree(d_globalBBox));
}
template <
int DIM,
typename T>
void testMemLayout(
int pointNum,
PointCloud<T> const & pc)
{
std::vector<T> && points = pc.extractPart(pointNum, DIM);
T sphereRadius = 1.45f * pc.stddev_;
if (g_verbose && DIM <= 3)
{
rd::GraphDrawer<T> gDrawer;
std::ostringstream os;
os << typeid(T).name() << "_" << DIM << "D_" << g_devName;
os << "_initial_samples_set";
gDrawer.showPoints(os.str(), points.data(), pointNum, DIM);
}
std::cout << rd::HLINE << std::endl;
std::cout << "<<<<< ROW_MAJOR >>>>>" << std::endl;
testBuildTree<DIM, rd::ROW_MAJOR, rd::ROW_MAJOR>(points.data(), pointNum, sphereRadius);
std::cout << rd::HLINE << std::endl;
std::cout << "<<<<< COL_MAJOR >>>>>" << std::endl;
testBuildTree<DIM, rd::COL_MAJOR, rd::COL_MAJOR>(points.data(), pointNum, sphereRadius);
}
/**
* @brief helper structure for static for loop over dimension
*/
struct IterateDimensions
{
template <typename D, typename T>
static void impl(
D idx,
int pointNum,
PointCloud<T> const & pc)
{
std::cout << rd::HLINE << std::endl;
std::cout << ">>>> Dimension: " << idx << "D\n";
testMemLayout<D::value>(pointNum, pc);
}
};
/**
* @brief Test detection time & quality relative to point dimension
*/
template <
int DIM,
typename T>
// typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
struct TestDimensions
{
static void impl(
PointCloud<T> & pc,
int pointNum)
{
static_assert(DIM != 0, "DIM equal to zero!\n");
pc.pointCnt_ = pointNum;
pc.initializeData();
size_t neededMemSize = 5 * pointNum * DIM * sizeof(T);
neededMemSize = std::max(HUNDRED_MB_IN_BYTES, neededMemSize);
std::cout << "Reserve " << float(neededMemSize) / 1024.f / 1024.f
<< " Mb for malloc heap size" << std::endl;
configureDevice(neededMemSize);
std::cout << rd::HLINE << std::endl;
std::cout << ">>>> Dimension: " << DIM << "D\n";
testMemLayout<DIM>(pointNum, pc);
}
};
template <typename T>
struct TestDimensions<0, T>
{
static void impl(
PointCloud<T> & pc,
int pointNum)
{
pc.pointCnt_ = pointNum;
pc.dim_ = MAX_TEST_DIM;
pc.initializeData();
size_t neededMemSize = 10 * pointNum * MAX_TEST_DIM * sizeof(T);
neededMemSize = std::max(HUNDRED_MB_IN_BYTES, neededMemSize);
std::cout << "Reserve " << float(neededMemSize) / 1024.f / 1024.f
<< " Mb for malloc heap size" << std::endl;
configureDevice(neededMemSize);
StaticFor<1, MAX_TEST_DIM, IterateDimensions>::impl(pointNum, pc);
}
};
/**
* @brief Test detection time & quality relative to number of points
*/
template <
typename T,
int DIM = 0,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
static void testSize(
PointCloud<T> & pc,
int pointNum = -1)
{
if (pointNum > 0)
{
TestDimensions<DIM, T>::impl(pc, pointNum);
}
else
{
for (int k = 1000; k <= MAX_POINTS_NUM; k *= 10)
{
std::cout << "\n//------------------------------------------"
<< "\n//\t\t pointNum: " << k
<< "\n//------------------------------------------\n";
TestDimensions<DIM, T>::impl(pc, k);
}
}
}
int main(int argc, char const **argv)
{
float a = -1.f, b = -1.f, stddev = -1.f;
int pointNum = -1;
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--a <a parameter of spiral or length if dim > 3>]\n"
"\t\t[--b <b parameter of spiral or ignored if dim > 3>]\n"
"\t\t[--stddev <standard deviation of generated samples>]\n"
"\t\t[--size <number of points>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--v <verbose>]\n"
"\n", argv[0]);
exit(0);
}
if (args.CheckCmdLineFlag("a"))
{
args.GetCmdLineArgument("a", a);
}
if (args.CheckCmdLineFlag("b"))
{
args.GetCmdLineArgument("b", b);
}
if (args.CheckCmdLineFlag("stddev"))
{
args.GetCmdLineArgument("stddev", stddev);
}
if (args.CheckCmdLineFlag("size"))
{
args.GetCmdLineArgument("size", pointNum);
}
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", g_devId);
}
if (args.CheckCmdLineFlag("v"))
{
g_verbose = true;
}
checkCudaErrors(deviceInit(g_devId));
// set device name for logging and drawing purposes
cudaDeviceProp devProp;
checkCudaErrors(cudaGetDeviceProperties(&devProp, g_devId));
g_devName = devProp.name;
#ifdef QUICK_TEST
if (pointNum < 0 ||
a < 0 ||
b < 0 ||
stddev < 0)
{
std::cout << "Have to specify parameters! Rerun with --help for help.\n";
exit(1);
}
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (spiral) float: "
<< "\n//------------------------------------------\n";
const int dim = 3;
PointCloud<float> && fpc = SpiralPointCloud<float>(a, b, pointNum, dim, stddev);
TestDimensions<dim, float>::impl(fpc, pointNum);
// std::cout << "\n//------------------------------------------"
// << "\n//\t\t (spiral) double: "
// << "\n//------------------------------------------\n";
// PointCloud<double> && dpc = SpiralPointCloud<double>(a, b, pointNum, dim, stddev);
// TestDimensions<dim, double>::impl(dpc, pointNum);
#else
// 1e6 2D points, spiral a=22, b=10, stddev=4
#ifndef RD_DOUBLE_PRECISION
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (spiral) float: "
<< "\n//------------------------------------------\n";
PointCloud<float> && fpc2d = SpiralPointCloud<float>(22.f, 10.f, 0, 2, 4.f);
PointCloud<float> && fpc3d = SpiralPointCloud<float>(22.f, 10.f, 0, 3, 4.f);
TestDimensions<2, float>::impl(fpc2d, int(1e6));
TestDimensions<3, float>::impl(fpc3d, int(1e6));
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (segment) float: "
<< "\n//------------------------------------------\n";
PointCloud<float> && fpc2 = SegmentPointCloud<float>(1000.f, 0, 0, 4.f);
testSize<float>(fpc2);
#else
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (spiral) double: "
<< "\n//------------------------------------------\n";
PointCloud<double> && dpc2d = SpiralPointCloud<double>(22.0, 10.0, 0, 2, 4.0);
PointCloud<double> && dpc3d = SpiralPointCloud<double>(22.0, 10.0, 0, 3, 4.0);
TestDimensions<2, double>::impl(dpc2d, int(1e6));
TestDimensions<3, double>::impl(dpc3d, int(1e6));
PointCloud<double> && dpc2 = SegmentPointCloud<double>(1000.0, 0, 0, 4.0);
std::cout << "\n//------------------------------------------"
<< "\n//\t\t (segment) double: "
<< "\n//------------------------------------------\n";
testSize<double>(dpc2);
#endif
#endif
checkCudaErrors(cudaDeviceReset());
std::cout << rd::HLINE << std::endl;
std::cout << "END!" << std::endl;
return EXIT_SUCCESS;
}
|
9e631d68214da24abdeb1edcb1f18f3e43132c16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
/*
* nsys should register performance changes when execution configuration
* is updated.
*/
threadsPerBlock = 1;
numberOfBlocks = 1;
hipError_t addVectorsErr;
hipError_t asyncErr;
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| 9e631d68214da24abdeb1edcb1f18f3e43132c16.cu | #include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
/*
* nsys should register performance changes when execution configuration
* is updated.
*/
threadsPerBlock = 1;
numberOfBlocks = 1;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
b68f5f93e1d658b6a21d5239642462b8c49e180d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "stats_kernal.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
float *device_soln = NULL;
hipMalloc(&device_soln, XSIZE*YSIZE);
const int size = 1;
const int num_calcs = 1;
const int num_threads = 1;
const int offset = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
stats_kernal), dim3(gridBlock),dim3(threadBlock), 0, 0, data,device_soln,size,num_calcs,num_threads,offset);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
stats_kernal), dim3(gridBlock),dim3(threadBlock), 0, 0, data,device_soln,size,num_calcs,num_threads,offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
stats_kernal), dim3(gridBlock),dim3(threadBlock), 0, 0, data,device_soln,size,num_calcs,num_threads,offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b68f5f93e1d658b6a21d5239642462b8c49e180d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "stats_kernal.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
float *device_soln = NULL;
cudaMalloc(&device_soln, XSIZE*YSIZE);
const int size = 1;
const int num_calcs = 1;
const int num_threads = 1;
const int offset = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
stats_kernal<<<gridBlock,threadBlock>>>(data,device_soln,size,num_calcs,num_threads,offset);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
stats_kernal<<<gridBlock,threadBlock>>>(data,device_soln,size,num_calcs,num_threads,offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
stats_kernal<<<gridBlock,threadBlock>>>(data,device_soln,size,num_calcs,num_threads,offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
16c6ecb5834536b7527487c636a4fd6d6537efd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void __transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
} | 16c6ecb5834536b7527487c636a4fd6d6537efd8.cu | #include "includes.h"
__global__ void __transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
} |
251413922d0251026b99ad3e4b95055dc9aa1d92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fractal.hh"
#include "utils.hh"
#include <cutil_inline.h>
#include <cuda_gl_interop.h>
#include <cutil_gl_inline.h>
#include <cutil_math.h>
#include <hip/hip_vector_types.h>
#include <vector_functions.h>
#include <math_functions.h>
#include <iostream>
#include <iomanip>
using namespace std;
#include "kernel-utils.hh"
__global__ void drawJuliaKernel(unsigned int *out,
float centerX, float centerY, float jcx, float jcy, float scale, size_t width, size_t height,
size_t maxIters) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
// Position of this pixel
float x0 = centerX + (x - (width * 0.5f)) * scale;
float y0 = centerY + (y - (height * 0.5f)) * scale;
int iters = 0;
float cx = x0;
float cy = y0;
// Squared values
float scx = cx * cx, scy = cy * cy;
while ((scx + scy) < 4 && iters < maxIters) {
float xt = scx - scy + jcx;
cy = 2 * cx * cy + jcy;
cx = xt;
// Calculate squared values
scx = cx * cx;
scy = cy * cy;
iters++;
}
unsigned int color = getColor(iters, maxIters);
out[y * width + x] = color;
}
JuliaRenderer::JuliaRenderer(size_t width, size_t height, float cx_, float cy_) :
FractalRenderer(width, height), cx(cx_), cy(cy_) {
}
JuliaRenderer::~JuliaRenderer() {
}
void JuliaRenderer::calculateFractal(unsigned int* target) {
size_t iters = 4.0f / pow(scale, 0.6);
iters = min((unsigned int) iters, 10000);
iters *= iterScale;
cout << "Rendering Julia(" << fixed << setprecision(5) << cx << ", " << cy << ") at "
<< x << "," << y << " with scale " << scale << ", max iters " << iters << endl;
hipLaunchKernelGGL(( drawJuliaKernel), dim3(*dimGrid), dim3(*dimBlock), 0, 0, target, x, y, cx, cy, scale, width, height, iters);
changed = false;
}
| 251413922d0251026b99ad3e4b95055dc9aa1d92.cu | #include "fractal.hh"
#include "utils.hh"
#include <cutil_inline.h>
#include <cuda_gl_interop.h>
#include <cutil_gl_inline.h>
#include <cutil_math.h>
#include <vector_types.h>
#include <vector_functions.h>
#include <math_functions.h>
#include <iostream>
#include <iomanip>
using namespace std;
#include "kernel-utils.hh"
__global__ void drawJuliaKernel(unsigned int *out,
float centerX, float centerY, float jcx, float jcy, float scale, size_t width, size_t height,
size_t maxIters) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
// Position of this pixel
float x0 = centerX + (x - (width * 0.5f)) * scale;
float y0 = centerY + (y - (height * 0.5f)) * scale;
int iters = 0;
float cx = x0;
float cy = y0;
// Squared values
float scx = cx * cx, scy = cy * cy;
while ((scx + scy) < 4 && iters < maxIters) {
float xt = scx - scy + jcx;
cy = 2 * cx * cy + jcy;
cx = xt;
// Calculate squared values
scx = cx * cx;
scy = cy * cy;
iters++;
}
unsigned int color = getColor(iters, maxIters);
out[y * width + x] = color;
}
JuliaRenderer::JuliaRenderer(size_t width, size_t height, float cx_, float cy_) :
FractalRenderer(width, height), cx(cx_), cy(cy_) {
}
JuliaRenderer::~JuliaRenderer() {
}
void JuliaRenderer::calculateFractal(unsigned int* target) {
size_t iters = 4.0f / pow(scale, 0.6);
iters = min((unsigned int) iters, 10000);
iters *= iterScale;
cout << "Rendering Julia(" << fixed << setprecision(5) << cx << ", " << cy << ") at "
<< x << "," << y << " with scale " << scale << ", max iters " << iters << endl;
drawJuliaKernel<<<*dimGrid, *dimBlock>>>(target, x, y, cx, cy, scale, width, height, iters);
changed = false;
}
|
6b0bf790dc099bdc8c4f5594d5f65f66212a50a8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "SharedMem2Registers.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *outFloat = NULL;
hipMalloc(&outFloat, XSIZE*YSIZE);
int iSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
SharedMem2Registers), dim3(gridBlock),dim3(threadBlock), 0, 0, outFloat,iSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
SharedMem2Registers), dim3(gridBlock),dim3(threadBlock), 0, 0, outFloat,iSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
SharedMem2Registers), dim3(gridBlock),dim3(threadBlock), 0, 0, outFloat,iSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6b0bf790dc099bdc8c4f5594d5f65f66212a50a8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "SharedMem2Registers.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *outFloat = NULL;
cudaMalloc(&outFloat, XSIZE*YSIZE);
int iSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
SharedMem2Registers<<<gridBlock,threadBlock>>>(outFloat,iSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
SharedMem2Registers<<<gridBlock,threadBlock>>>(outFloat,iSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
SharedMem2Registers<<<gridBlock,threadBlock>>>(outFloat,iSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8a862e28b646fac314f1d183ce45b4864d57f5c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/layers/element_wise_function.hpp"
#include "HugeCTR/include/layers/multiply_layer.hpp"
#include "HugeCTR/include/utils.cuh"
#include "HugeCTR/include/utils.hpp"
#include <algorithm>
#include <functional>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
#define BLOCK_DIM_SIZE 32
template <typename T>
__global__ void multiply_kernel(const T* input, const T* weight, T* output, int batch_size,
int slot_num, int embedding_vec_size) {
if ((blockIdx.x < batch_size) && (threadIdx.x < embedding_vec_size)) {
for (int i = 0; i < slot_num; i++) {
output[blockIdx.x * slot_num * embedding_vec_size + i * embedding_vec_size + threadIdx.x] =
input[blockIdx.x * slot_num + i] * weight[i * embedding_vec_size + threadIdx.x];
}
}
}
template <typename T>
__global__ void multiply_transpose_fuse_kernel(int batch_size, int slot_num, int embedding_vec_size,
const T* top_grad, const T* input,
T* wgrad_tmp_trans) {
int row = batch_size;
int col = slot_num * embedding_vec_size;
__shared__ T sh_data[BLOCK_DIM_SIZE + 1][BLOCK_DIM_SIZE];
int src_index_x = blockIdx.x * blockDim.x + threadIdx.x;
int src_index_y = blockIdx.y * blockDim.y + threadIdx.y;
if ((src_index_x < col) && (src_index_y < row)) {
int index_in = src_index_y * col + src_index_x;
sh_data[threadIdx.x][threadIdx.y] = top_grad[index_in] * input[index_in / embedding_vec_size];
}
__syncthreads();
int dst_index_x = blockIdx.y * blockDim.y + threadIdx.x;
int dst_index_y = blockIdx.x * blockDim.x + threadIdx.y;
if ((dst_index_x < row) && (dst_index_y < col)) {
int index_out = dst_index_y * row + dst_index_x;
wgrad_tmp_trans[index_out] = sh_data[threadIdx.y][threadIdx.x];
}
}
// sum reduce computation in one block
template <typename T>
__global__ void sum_reduce_batch_kernel(int row, // row=gridDim.x
int col, const T* input, T* output) {
float local_sum = 0.0f;
for (int tid = threadIdx.x; tid < col; tid += blockDim.x) {
local_sum += input[blockIdx.x * col + tid];
}
__syncthreads();
local_sum = blockReduceSum(local_sum);
if (threadIdx.x == 0) {
output[blockIdx.x] += local_sum;
}
}
template <typename T>
__global__ void multiply_dgrad_kernel(const T* top_grad, const T* weight, T* dgrad, int batch_size,
int slot_num, int embedding_vec_size) {
if ((blockIdx.x < batch_size) && (threadIdx.x < embedding_vec_size)) {
for (int i = 0; i < slot_num; i++) {
T local_sum = top_grad[blockIdx.x * slot_num * embedding_vec_size + i * embedding_vec_size +
threadIdx.x] *
weight[i * embedding_vec_size + threadIdx.x];
local_sum = blockReduceSum(local_sum);
if (threadIdx.x == 0) {
dgrad[blockIdx.x * slot_num + i] = local_sum;
}
}
}
}
template <typename T>
void multiply_wgrad(const T* top_grad, const T* input, T* wgrad, T* wgrad_tmp_trans, int batch_size,
int slot_num, int embedding_vec_size, hipStream_t stream) {
dim3 blockSize1(BLOCK_DIM_SIZE, BLOCK_DIM_SIZE, 1);
dim3 gridSize1((slot_num * embedding_vec_size + blockSize1.x - 1) / blockSize1.x,
(batch_size + blockSize1.y - 1) / blockSize1.y, 1);
hipLaunchKernelGGL(( multiply_transpose_fuse_kernel), dim3(gridSize1), dim3(blockSize1), 0, stream,
batch_size, slot_num, embedding_vec_size, top_grad, input, wgrad_tmp_trans);
dim3 blockSize2(256, 1, 1);
dim3 gridSize2(slot_num * embedding_vec_size, 1, 1);
hipLaunchKernelGGL(( sum_reduce_batch_kernel), dim3(gridSize2), dim3(blockSize2), 0, stream, slot_num * embedding_vec_size,
batch_size, wgrad_tmp_trans, wgrad);
}
template <typename T>
void multiply_dgrad(const T* top_grad, const T* weight, T* dgrad, int batch_size, int slot_num,
int embedding_vec_size, hipStream_t stream) {
dim3 blockSize(embedding_vec_size, 1, 1);
dim3 gridSize(batch_size, 1, 1);
hipLaunchKernelGGL(( multiply_dgrad_kernel), dim3(gridSize), dim3(blockSize), 0, stream, top_grad, weight, dgrad, batch_size,
slot_num, embedding_vec_size);
}
} // end of namespace
MultiplyLayer::MultiplyLayer(const std::shared_ptr<GeneralBuffer<float>>& weight_buff,
const std::shared_ptr<GeneralBuffer<float>>& wgrad_buff,
const std::shared_ptr<GeneralBuffer<float>>& blob_buff,
const std::shared_ptr<Tensor<float>>& in_tensor,
std::shared_ptr<Tensor<float>>& out_tensor,
const std::vector<size_t>& weight_dims, int device_id)
: Layer(device_id) {
try {
CudaDeviceContext context(get_device_id());
auto in_dims = in_tensor->get_dims();
if (in_dims.size() != 2) {
CK_THROW_(Error_t::WrongInput, "Only 2D tensors can be multiplied");
}
if (in_tensor->get_format() != TensorFormat_t::HW) {
CK_THROW_(Error_t::WrongInput, "Only TensorFormat_t::HW is allowed for multiply layer");
}
if (weight_dims.size() != 2) {
CK_THROW_(Error_t::WrongInput, "Only 2D weights is allowed for multiply layer");
}
if (weight_dims[0] != in_dims[1]) {
CK_THROW_(Error_t::WrongInput, "weight_dims[0] must be equal to in_dims[1]");
}
batch_size_ = in_dims[0];
slot_num_ = weight_dims[0];
embedding_vec_size_ = weight_dims[1];
std::vector<size_t> out_dims{batch_size_, slot_num_ * embedding_vec_size_};
out_tensor.reset(new Tensor<float>(out_dims, blob_buff, in_tensor->get_format()));
in_tensors_.emplace_back(in_tensor);
out_tensors_.emplace_back(out_tensor);
TensorFormat_t w_format = TensorFormat_t::HW;
weights_.emplace_back(new Tensor<float>(weight_dims, weight_buff, w_format));
wgrad_.emplace_back(new Tensor<float>(weight_dims, wgrad_buff, w_format));
internal_buff_.reset(new GeneralBuffer<float>());
wgrad_tmp_trans_.reset(new Tensor<float>(out_dims, internal_buff_, TensorFormat_t::HW));
internal_buff_->init(get_device_id());
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
std::vector<float> MultiplyLayer::get_initializer() {
std::vector<float> initializer;
size_t w_size = (weights_[0])->get_num_elements();
initializer.resize(w_size);
float in_dim = slot_num_; // in_tensor dim[1]
float out_dim = slot_num_ * embedding_vec_size_; // out_tensor dim[1]
float limit = sqrt(6.f / (in_dim + out_dim));
HugeCTR::UnifiedDataSimulator<float> fdata_sim(-1 * limit, limit);
for (size_t i = 0; i < w_size; i++) initializer[i] = fdata_sim.get_num();
return initializer;
}
void MultiplyLayer::fprop(hipStream_t stream) {
CudaDeviceContext context(get_device_id());
float* input = in_tensors_[0]->get_ptr();
float* weight = weights_[0]->get_ptr();
float* output = out_tensors_[0]->get_ptr();
dim3 blockSize(embedding_vec_size_, 1, 1);
dim3 gridSize(batch_size_, 1, 1);
hipLaunchKernelGGL(( multiply_kernel), dim3(gridSize), dim3(blockSize), 0, stream, input, weight, output, batch_size_, slot_num_,
embedding_vec_size_);
}
void MultiplyLayer::bprop(hipStream_t stream) {
CudaDeviceContext context(get_device_id());
float* weight = weights_[0]->get_ptr();
float* wgrad = wgrad_[0]->get_ptr();
float* wgrad_tmp_trans = wgrad_tmp_trans_->get_ptr();
float* input = in_tensors_[0]->get_ptr();
float* output = out_tensors_[0]->get_ptr();
multiply_wgrad(output, input, wgrad, wgrad_tmp_trans, batch_size_, slot_num_, embedding_vec_size_,
stream);
// CAUSION: dgrad computation will modify the "input", so it must be put after wgrad computation
multiply_dgrad(output, weight, input, batch_size_, slot_num_, embedding_vec_size_, stream);
}
} // namespace HugeCTR
| 8a862e28b646fac314f1d183ce45b4864d57f5c1.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/layers/element_wise_function.hpp"
#include "HugeCTR/include/layers/multiply_layer.hpp"
#include "HugeCTR/include/utils.cuh"
#include "HugeCTR/include/utils.hpp"
#include <algorithm>
#include <functional>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
#define BLOCK_DIM_SIZE 32
template <typename T>
__global__ void multiply_kernel(const T* input, const T* weight, T* output, int batch_size,
int slot_num, int embedding_vec_size) {
if ((blockIdx.x < batch_size) && (threadIdx.x < embedding_vec_size)) {
for (int i = 0; i < slot_num; i++) {
output[blockIdx.x * slot_num * embedding_vec_size + i * embedding_vec_size + threadIdx.x] =
input[blockIdx.x * slot_num + i] * weight[i * embedding_vec_size + threadIdx.x];
}
}
}
template <typename T>
__global__ void multiply_transpose_fuse_kernel(int batch_size, int slot_num, int embedding_vec_size,
const T* top_grad, const T* input,
T* wgrad_tmp_trans) {
int row = batch_size;
int col = slot_num * embedding_vec_size;
__shared__ T sh_data[BLOCK_DIM_SIZE + 1][BLOCK_DIM_SIZE];
int src_index_x = blockIdx.x * blockDim.x + threadIdx.x;
int src_index_y = blockIdx.y * blockDim.y + threadIdx.y;
if ((src_index_x < col) && (src_index_y < row)) {
int index_in = src_index_y * col + src_index_x;
sh_data[threadIdx.x][threadIdx.y] = top_grad[index_in] * input[index_in / embedding_vec_size];
}
__syncthreads();
int dst_index_x = blockIdx.y * blockDim.y + threadIdx.x;
int dst_index_y = blockIdx.x * blockDim.x + threadIdx.y;
if ((dst_index_x < row) && (dst_index_y < col)) {
int index_out = dst_index_y * row + dst_index_x;
wgrad_tmp_trans[index_out] = sh_data[threadIdx.y][threadIdx.x];
}
}
// sum reduce computation in one block
template <typename T>
__global__ void sum_reduce_batch_kernel(int row, // row=gridDim.x
int col, const T* input, T* output) {
float local_sum = 0.0f;
for (int tid = threadIdx.x; tid < col; tid += blockDim.x) {
local_sum += input[blockIdx.x * col + tid];
}
__syncthreads();
local_sum = blockReduceSum(local_sum);
if (threadIdx.x == 0) {
output[blockIdx.x] += local_sum;
}
}
template <typename T>
__global__ void multiply_dgrad_kernel(const T* top_grad, const T* weight, T* dgrad, int batch_size,
int slot_num, int embedding_vec_size) {
if ((blockIdx.x < batch_size) && (threadIdx.x < embedding_vec_size)) {
for (int i = 0; i < slot_num; i++) {
T local_sum = top_grad[blockIdx.x * slot_num * embedding_vec_size + i * embedding_vec_size +
threadIdx.x] *
weight[i * embedding_vec_size + threadIdx.x];
local_sum = blockReduceSum(local_sum);
if (threadIdx.x == 0) {
dgrad[blockIdx.x * slot_num + i] = local_sum;
}
}
}
}
template <typename T>
void multiply_wgrad(const T* top_grad, const T* input, T* wgrad, T* wgrad_tmp_trans, int batch_size,
int slot_num, int embedding_vec_size, cudaStream_t stream) {
dim3 blockSize1(BLOCK_DIM_SIZE, BLOCK_DIM_SIZE, 1);
dim3 gridSize1((slot_num * embedding_vec_size + blockSize1.x - 1) / blockSize1.x,
(batch_size + blockSize1.y - 1) / blockSize1.y, 1);
multiply_transpose_fuse_kernel<<<gridSize1, blockSize1, 0, stream>>>(
batch_size, slot_num, embedding_vec_size, top_grad, input, wgrad_tmp_trans);
dim3 blockSize2(256, 1, 1);
dim3 gridSize2(slot_num * embedding_vec_size, 1, 1);
sum_reduce_batch_kernel<<<gridSize2, blockSize2, 0, stream>>>(slot_num * embedding_vec_size,
batch_size, wgrad_tmp_trans, wgrad);
}
template <typename T>
void multiply_dgrad(const T* top_grad, const T* weight, T* dgrad, int batch_size, int slot_num,
int embedding_vec_size, cudaStream_t stream) {
dim3 blockSize(embedding_vec_size, 1, 1);
dim3 gridSize(batch_size, 1, 1);
multiply_dgrad_kernel<<<gridSize, blockSize, 0, stream>>>(top_grad, weight, dgrad, batch_size,
slot_num, embedding_vec_size);
}
} // end of namespace
MultiplyLayer::MultiplyLayer(const std::shared_ptr<GeneralBuffer<float>>& weight_buff,
const std::shared_ptr<GeneralBuffer<float>>& wgrad_buff,
const std::shared_ptr<GeneralBuffer<float>>& blob_buff,
const std::shared_ptr<Tensor<float>>& in_tensor,
std::shared_ptr<Tensor<float>>& out_tensor,
const std::vector<size_t>& weight_dims, int device_id)
: Layer(device_id) {
try {
CudaDeviceContext context(get_device_id());
auto in_dims = in_tensor->get_dims();
if (in_dims.size() != 2) {
CK_THROW_(Error_t::WrongInput, "Only 2D tensors can be multiplied");
}
if (in_tensor->get_format() != TensorFormat_t::HW) {
CK_THROW_(Error_t::WrongInput, "Only TensorFormat_t::HW is allowed for multiply layer");
}
if (weight_dims.size() != 2) {
CK_THROW_(Error_t::WrongInput, "Only 2D weights is allowed for multiply layer");
}
if (weight_dims[0] != in_dims[1]) {
CK_THROW_(Error_t::WrongInput, "weight_dims[0] must be equal to in_dims[1]");
}
batch_size_ = in_dims[0];
slot_num_ = weight_dims[0];
embedding_vec_size_ = weight_dims[1];
std::vector<size_t> out_dims{batch_size_, slot_num_ * embedding_vec_size_};
out_tensor.reset(new Tensor<float>(out_dims, blob_buff, in_tensor->get_format()));
in_tensors_.emplace_back(in_tensor);
out_tensors_.emplace_back(out_tensor);
TensorFormat_t w_format = TensorFormat_t::HW;
weights_.emplace_back(new Tensor<float>(weight_dims, weight_buff, w_format));
wgrad_.emplace_back(new Tensor<float>(weight_dims, wgrad_buff, w_format));
internal_buff_.reset(new GeneralBuffer<float>());
wgrad_tmp_trans_.reset(new Tensor<float>(out_dims, internal_buff_, TensorFormat_t::HW));
internal_buff_->init(get_device_id());
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
std::vector<float> MultiplyLayer::get_initializer() {
std::vector<float> initializer;
size_t w_size = (weights_[0])->get_num_elements();
initializer.resize(w_size);
float in_dim = slot_num_; // in_tensor dim[1]
float out_dim = slot_num_ * embedding_vec_size_; // out_tensor dim[1]
float limit = sqrt(6.f / (in_dim + out_dim));
HugeCTR::UnifiedDataSimulator<float> fdata_sim(-1 * limit, limit);
for (size_t i = 0; i < w_size; i++) initializer[i] = fdata_sim.get_num();
return initializer;
}
void MultiplyLayer::fprop(cudaStream_t stream) {
CudaDeviceContext context(get_device_id());
float* input = in_tensors_[0]->get_ptr();
float* weight = weights_[0]->get_ptr();
float* output = out_tensors_[0]->get_ptr();
dim3 blockSize(embedding_vec_size_, 1, 1);
dim3 gridSize(batch_size_, 1, 1);
multiply_kernel<<<gridSize, blockSize, 0, stream>>>(input, weight, output, batch_size_, slot_num_,
embedding_vec_size_);
}
void MultiplyLayer::bprop(cudaStream_t stream) {
CudaDeviceContext context(get_device_id());
float* weight = weights_[0]->get_ptr();
float* wgrad = wgrad_[0]->get_ptr();
float* wgrad_tmp_trans = wgrad_tmp_trans_->get_ptr();
float* input = in_tensors_[0]->get_ptr();
float* output = out_tensors_[0]->get_ptr();
multiply_wgrad(output, input, wgrad, wgrad_tmp_trans, batch_size_, slot_num_, embedding_vec_size_,
stream);
// CAUSION: dgrad computation will modify the "input", so it must be put after wgrad computation
multiply_dgrad(output, weight, input, batch_size_, slot_num_, embedding_vec_size_, stream);
}
} // namespace HugeCTR
|
b8c793b80537d1e42911c012865f347995275c0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/elu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ELUForward(const int n, const Dtype* in, Dtype* out,
Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] :
alpha * (exp(in[index]) - 1);
}
}
template <typename Dtype>
void ELULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ELUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(),
count, bottom_data, top_data, alpha);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ELUBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, const Dtype* in_data,
Dtype* out_diff, Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_data[index] > 0 ? in_diff[index] :
in_diff[index] * (out_data[index] + alpha);
}
}
template <typename Dtype>
void ELULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ELUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(),
count, top_diff, top_data, bottom_data, bottom_diff, alpha);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ELULayer);
} // namespace caffe
| b8c793b80537d1e42911c012865f347995275c0c.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/elu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ELUForward(const int n, const Dtype* in, Dtype* out,
Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] :
alpha * (exp(in[index]) - 1);
}
}
template <typename Dtype>
void ELULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
// NOLINT_NEXT_LINE(whitespace/operators)
ELUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>(
count, bottom_data, top_data, alpha);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ELUBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, const Dtype* in_data,
Dtype* out_diff, Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_data[index] > 0 ? in_diff[index] :
in_diff[index] * (out_data[index] + alpha);
}
}
template <typename Dtype>
void ELULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
// NOLINT_NEXT_LINE(whitespace/operators)
ELUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>(
count, top_diff, top_data, bottom_data, bottom_diff, alpha);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ELULayer);
} // namespace caffe
|
9a535284affe440a3e6d523281d67c8a3673fbca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common_hip.cuh"
__global__ void process(const int* __restrict__ nitems_per_cell,
const int* __restrict__ cell_data,
const int* __restrict__ start_index,
int* __restrict__ output,
int num_cells) {
int id = threadIdx.x + 32 * blockIdx.x;
int n = id >= num_cells ? 0 : nitems_per_cell[id];
bool block = n >= 16;
int mask = __ballot(block);
if (mask != 0) {
do {
int bit = __ffs(mask) - 1;
mask &= ~(1 << bit);
int block_id = bit + 32 * blockIdx.x;
int block_n = nitems_per_cell[block_id];
int block_start = start_index[block_id];
int block_data = cell_data[block_id];
for (int i = threadIdx.x; i < block_n; i += 32) output[block_start + i] = block_data + i;
} while (mask != 0);
}
if (id < num_cells && !block) {
int start = start_index[id];
int data = cell_data[id];
for (int i = 0; i < n; i++) output[start + i] = data + i;
}
}
void process_blocked(const int* nitems_per_cell, const int* cell_data, const int* start_index, int* output, int num_cells, int num_items) {
hipLaunchKernelGGL(( process), dim3(round_to(num_cells, 32)), dim3(32), 0, 0, nitems_per_cell, cell_data, start_index, output, num_cells);
}
| 9a535284affe440a3e6d523281d67c8a3673fbca.cu | #include "common.cuh"
__global__ void process(const int* __restrict__ nitems_per_cell,
const int* __restrict__ cell_data,
const int* __restrict__ start_index,
int* __restrict__ output,
int num_cells) {
int id = threadIdx.x + 32 * blockIdx.x;
int n = id >= num_cells ? 0 : nitems_per_cell[id];
bool block = n >= 16;
int mask = __ballot(block);
if (mask != 0) {
do {
int bit = __ffs(mask) - 1;
mask &= ~(1 << bit);
int block_id = bit + 32 * blockIdx.x;
int block_n = nitems_per_cell[block_id];
int block_start = start_index[block_id];
int block_data = cell_data[block_id];
for (int i = threadIdx.x; i < block_n; i += 32) output[block_start + i] = block_data + i;
} while (mask != 0);
}
if (id < num_cells && !block) {
int start = start_index[id];
int data = cell_data[id];
for (int i = 0; i < n; i++) output[start + i] = data + i;
}
}
void process_blocked(const int* nitems_per_cell, const int* cell_data, const int* start_index, int* output, int num_cells, int num_items) {
process<<<round_to(num_cells, 32), 32>>>(nitems_per_cell, cell_data, start_index, output, num_cells);
}
|
9c6e99bf7f1010a1eb7263824ec60beb0d95f089.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// leaky_relu.cu
#include <algorithm>
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/leaky_relu.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_leaky_relu_forward(const int num, T *y, const T *x,
float alpha) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
T x_idx = x[idx];
if (x_idx > 0) {
y[idx] = x_idx;
} else {
y[idx] = alpha * x_idx;
}
}
}
template <typename T, bool accum = true>
__global__ void kernel_leaky_relu_backward(const int num, T *dx, const T *x,
const T *dy, float alpha) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
if (accum) {
if (x[idx] > 0)
dx[idx] += dy[idx];
else
dx[idx] += alpha * dy[idx];
} else {
if (x[idx] > 0)
dx[idx] = dy[idx];
else
dx[idx] = alpha * dy[idx];
}
}
}
template <class T>
void LeakyReLUCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *y =
outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, !this->inplace_);
size_t size = inputs[0]->size();
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_leaky_relu_forward, size, y, x,
this->alpha_);
}
template <class T>
void LeakyReLUCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(std::stoi(this->ctx_.device_id));
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(
this->ctx_, !(this->inplace_ || accum[0]));
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
size_t size = inputs[0]->size();
if (dx != dy && accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_leaky_relu_backward<Tc, true>), size,
dx, x, dy, this->alpha_);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_leaky_relu_backward<Tc, false>),
size, dx, x, dy, this->alpha_);
}
}
}
| 9c6e99bf7f1010a1eb7263824ec60beb0d95f089.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// leaky_relu.cu
#include <algorithm>
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/leaky_relu.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_leaky_relu_forward(const int num, T *y, const T *x,
float alpha) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
T x_idx = x[idx];
if (x_idx > 0) {
y[idx] = x_idx;
} else {
y[idx] = alpha * x_idx;
}
}
}
template <typename T, bool accum = true>
__global__ void kernel_leaky_relu_backward(const int num, T *dx, const T *x,
const T *dy, float alpha) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
if (accum) {
if (x[idx] > 0)
dx[idx] += dy[idx];
else
dx[idx] += alpha * dy[idx];
} else {
if (x[idx] > 0)
dx[idx] = dy[idx];
else
dx[idx] = alpha * dy[idx];
}
}
}
template <class T>
void LeakyReLUCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *y =
outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, !this->inplace_);
size_t size = inputs[0]->size();
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_leaky_relu_forward, size, y, x,
this->alpha_);
}
template <class T>
void LeakyReLUCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(std::stoi(this->ctx_.device_id));
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(
this->ctx_, !(this->inplace_ || accum[0]));
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
size_t size = inputs[0]->size();
if (dx != dy && accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_leaky_relu_backward<Tc, true>), size,
dx, x, dy, this->alpha_);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_leaky_relu_backward<Tc, false>),
size, dx, x, dy, this->alpha_);
}
}
}
|
81737c7942f986f6b40820c66af578ffc100463b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "verify.hip"
//----------------------------------------------------------------------------
// Scans each warp in parallel ("warp-scan"), one element per thread.
// uses 2 numElements of shared memory per thread (64 = elements per warp)
//----------------------------------------------------------------------------
#define WARP_SIZE 32
__device__
unsigned int scanwarp(unsigned int val, volatile unsigned int* sData, const int maxlevel)
{
// The following is the same as 2 * RadixSort::WARP_SIZE * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (RadixSort::WARP_SIZE - 1))
int localId = threadIdx.x;
int idx = 2 * localId - (localId & (WARP_SIZE - 1));
sData[idx] = 0;
idx += WARP_SIZE;
sData[idx] = val;
if (0 <= maxlevel) { sData[idx] += sData[idx - 1]; }
if (1 <= maxlevel) { sData[idx] += sData[idx - 2]; }
if (2 <= maxlevel) { sData[idx] += sData[idx - 4]; }
if (3 <= maxlevel) { sData[idx] += sData[idx - 8]; }
if (4 <= maxlevel) { sData[idx] += sData[idx -16]; }
return sData[idx] - val; // convert inclusive -> exclusive
}
//----------------------------------------------------------------------------
// scan4 scans 4*RadixSort::CTA_SIZE numElements in a block (4 per thread), using
// a warp-scan algorithm
//----------------------------------------------------------------------------
__device__
uint4 scan4(const uint4 idata, unsigned int* ptr)
{
unsigned int idx = threadIdx.x;
uint4 val4 = idata;
unsigned int sum[3];
sum[0] = val4.x;
sum[1] = val4.y + sum[0];
sum[2] = val4.z + sum[1];
unsigned int val = val4.w + sum[2];
val = scanwarp(val, ptr, 4);
__syncthreads();
if ((idx & (WARP_SIZE - 1)) == WARP_SIZE - 1)
{
ptr[idx >> 5] = val + val4.w + sum[2];
}
__syncthreads();
if (idx < WARP_SIZE)
ptr[idx] = scanwarp(ptr[idx], ptr, 2);
__syncthreads();
val += ptr[idx >> 5];
val4.x = val;
val4.y = val + sum[0];
val4.z = val + sum[1];
val4.w = val + sum[2];
return val4;
}
__device__
uint4 rank4(const uint4 preds, unsigned int* sMem, unsigned int* numtrue)
{
int localId = threadIdx.x;
int localSize = blockDim.x;
uint4 address = scan4(preds, sMem);
if (localId == localSize - 1)
{
numtrue[0] = address.w + preds.w;
}
__syncthreads();
uint4 rank;
int idx = localId*4;
rank.x = (preds.x) ? address.x : numtrue[0] + idx - address.x;
rank.y = (preds.y) ? address.y : numtrue[0] + idx + 1 - address.y;
rank.z = (preds.z) ? address.z : numtrue[0] + idx + 2 - address.z;
rank.w = (preds.w) ? address.w : numtrue[0] + idx + 3 - address.w;
return rank;
}
__global__ void radixSortBlocksKeysK(
unsigned int* keysIn,
unsigned int* keysOut,
const unsigned int nbits,
const unsigned int startbit)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ unsigned int numtrue[1];
__shared__ unsigned int sMem[4*128];
uint4 key = reinterpret_cast<uint4*>(keysIn)[globalId];
__syncthreads();
// radixSortBlockKeysOnly(&key, nbits, startbit, sMem, numtrue);
int localId = threadIdx.x;
int localSize = blockDim.x;
for(unsigned int shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r;
r = rank4(lsb, sMem, numtrue);
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem[(r.x & 3) * localSize + (r.x >> 2)] = key.x;
sMem[(r.y & 3) * localSize + (r.y >> 2)] = key.y;
sMem[(r.z & 3) * localSize + (r.z >> 2)] = key.z;
sMem[(r.w & 3) * localSize + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem[localId];
key.y = sMem[localId + localSize];
key.z = sMem[localId + 2 * localSize];
key.w = sMem[localId + 3 * localSize];
__syncthreads();
}
//keysOut[globalId] = key;
reinterpret_cast<uint4*>(keysOut)[globalId] = key;
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <number of keys> <repeat>\n", argv[0]);
return 1;
}
const int N = atoi(argv[1]); // assume a multiple of 512
const int repeat = atoi(argv[2]);
srand(512);
unsigned int *keys = (unsigned int*) malloc (N * sizeof(unsigned int));
unsigned int *out = (unsigned int*) malloc (N * sizeof(unsigned int));
for (int i = 0; i < N; i++) keys[i] = rand() % 16;
const unsigned int startbit = 0;
const unsigned int nbits = 4;
const unsigned threads = 128; // 1
const unsigned teams = N/4/threads; // 1
unsigned int* d_keys;
hipMalloc((void**)&d_keys, N*sizeof(unsigned int));
hipMemcpy(d_keys, keys, N*sizeof(unsigned int), hipMemcpyHostToDevice);
unsigned int* d_tempKeys;
hipMalloc((void**)&d_tempKeys, N*sizeof(unsigned int));
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( radixSortBlocksKeysK), dim3(teams), dim3(threads), 0, 0, d_keys, d_tempKeys, nbits, startbit);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (us)\n", (time * 1e-3f) / repeat);
hipMemcpy(out, d_tempKeys, N*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipFree(d_keys);
hipFree(d_tempKeys);
bool check = verify(out, keys, threads, N);
if (check)
printf("PASS\n");
else
printf("FAIL\n");
free(keys);
free(out);
return 0;
}
| 81737c7942f986f6b40820c66af578ffc100463b.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda.h>
#include "verify.cu"
//----------------------------------------------------------------------------
// Scans each warp in parallel ("warp-scan"), one element per thread.
// uses 2 numElements of shared memory per thread (64 = elements per warp)
//----------------------------------------------------------------------------
#define WARP_SIZE 32
__device__
unsigned int scanwarp(unsigned int val, volatile unsigned int* sData, const int maxlevel)
{
// The following is the same as 2 * RadixSort::WARP_SIZE * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (RadixSort::WARP_SIZE - 1))
int localId = threadIdx.x;
int idx = 2 * localId - (localId & (WARP_SIZE - 1));
sData[idx] = 0;
idx += WARP_SIZE;
sData[idx] = val;
if (0 <= maxlevel) { sData[idx] += sData[idx - 1]; }
if (1 <= maxlevel) { sData[idx] += sData[idx - 2]; }
if (2 <= maxlevel) { sData[idx] += sData[idx - 4]; }
if (3 <= maxlevel) { sData[idx] += sData[idx - 8]; }
if (4 <= maxlevel) { sData[idx] += sData[idx -16]; }
return sData[idx] - val; // convert inclusive -> exclusive
}
//----------------------------------------------------------------------------
// scan4 scans 4*RadixSort::CTA_SIZE numElements in a block (4 per thread), using
// a warp-scan algorithm
//----------------------------------------------------------------------------
__device__
uint4 scan4(const uint4 idata, unsigned int* ptr)
{
unsigned int idx = threadIdx.x;
uint4 val4 = idata;
unsigned int sum[3];
sum[0] = val4.x;
sum[1] = val4.y + sum[0];
sum[2] = val4.z + sum[1];
unsigned int val = val4.w + sum[2];
val = scanwarp(val, ptr, 4);
__syncthreads();
if ((idx & (WARP_SIZE - 1)) == WARP_SIZE - 1)
{
ptr[idx >> 5] = val + val4.w + sum[2];
}
__syncthreads();
if (idx < WARP_SIZE)
ptr[idx] = scanwarp(ptr[idx], ptr, 2);
__syncthreads();
val += ptr[idx >> 5];
val4.x = val;
val4.y = val + sum[0];
val4.z = val + sum[1];
val4.w = val + sum[2];
return val4;
}
__device__
uint4 rank4(const uint4 preds, unsigned int* sMem, unsigned int* numtrue)
{
int localId = threadIdx.x;
int localSize = blockDim.x;
uint4 address = scan4(preds, sMem);
if (localId == localSize - 1)
{
numtrue[0] = address.w + preds.w;
}
__syncthreads();
uint4 rank;
int idx = localId*4;
rank.x = (preds.x) ? address.x : numtrue[0] + idx - address.x;
rank.y = (preds.y) ? address.y : numtrue[0] + idx + 1 - address.y;
rank.z = (preds.z) ? address.z : numtrue[0] + idx + 2 - address.z;
rank.w = (preds.w) ? address.w : numtrue[0] + idx + 3 - address.w;
return rank;
}
__global__ void radixSortBlocksKeysK(
unsigned int* keysIn,
unsigned int* keysOut,
const unsigned int nbits,
const unsigned int startbit)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ unsigned int numtrue[1];
__shared__ unsigned int sMem[4*128];
uint4 key = reinterpret_cast<uint4*>(keysIn)[globalId];
__syncthreads();
// radixSortBlockKeysOnly(&key, nbits, startbit, sMem, numtrue);
int localId = threadIdx.x;
int localSize = blockDim.x;
for(unsigned int shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r;
r = rank4(lsb, sMem, numtrue);
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem[(r.x & 3) * localSize + (r.x >> 2)] = key.x;
sMem[(r.y & 3) * localSize + (r.y >> 2)] = key.y;
sMem[(r.z & 3) * localSize + (r.z >> 2)] = key.z;
sMem[(r.w & 3) * localSize + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem[localId];
key.y = sMem[localId + localSize];
key.z = sMem[localId + 2 * localSize];
key.w = sMem[localId + 3 * localSize];
__syncthreads();
}
//keysOut[globalId] = key;
reinterpret_cast<uint4*>(keysOut)[globalId] = key;
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <number of keys> <repeat>\n", argv[0]);
return 1;
}
const int N = atoi(argv[1]); // assume a multiple of 512
const int repeat = atoi(argv[2]);
srand(512);
unsigned int *keys = (unsigned int*) malloc (N * sizeof(unsigned int));
unsigned int *out = (unsigned int*) malloc (N * sizeof(unsigned int));
for (int i = 0; i < N; i++) keys[i] = rand() % 16;
const unsigned int startbit = 0;
const unsigned int nbits = 4;
const unsigned threads = 128; // 1
const unsigned teams = N/4/threads; // 1
unsigned int* d_keys;
cudaMalloc((void**)&d_keys, N*sizeof(unsigned int));
cudaMemcpy(d_keys, keys, N*sizeof(unsigned int), cudaMemcpyHostToDevice);
unsigned int* d_tempKeys;
cudaMalloc((void**)&d_tempKeys, N*sizeof(unsigned int));
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
radixSortBlocksKeysK<<<teams, threads>>>(d_keys, d_tempKeys, nbits, startbit);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (us)\n", (time * 1e-3f) / repeat);
cudaMemcpy(out, d_tempKeys, N*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaFree(d_keys);
cudaFree(d_tempKeys);
bool check = verify(out, keys, threads, N);
if (check)
printf("PASS\n");
else
printf("FAIL\n");
free(keys);
free(out);
return 0;
}
|
f2d154f3614c4b79fe3313aeea2d6556fd18ae9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void forwardPathKernel(int noPaths, int nYears, int noSpecies, int noPatches, int noControls, int noUncertainties, float timeStep, float* initPops, float* pops, float*mmm, int* rowIdx, int* elemsPerCol, int maxElems, float* speciesParams, float* caps, float* aars, float* uncertParams, int* controls, float* uJumps, float* uBrownian, float* uJumpSizes, float* uJumpsSpecies, float* uBrownianSpecies, float* uJumpSizesSpecies, float* rgr, float* uResults, float* totalPops) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// Only perform matrix multiplication sequentially for now. Later, if so
// desired, we can use dynamic parallelism because the card in the
// machine has CUDA compute compatability 3.5
if (idx < noPaths) {
// Initialise the population data at time t=0
for (int ii = 0; ii < noSpecies; ii++) {
float population = 0;
for (int jj = 0; jj < noPatches; jj++) {
pops[idx*(nYears+1)*noSpecies*noPatches + ii*noPatches + jj] =
initPops[jj];
population += pops[idx*(nYears+1)*noSpecies*noPatches + ii*
noPatches + jj];
}
totalPops[idx*(nYears+1)*noSpecies + ii] = population;
// The aars are computed in the next for loop.
}
// Carry over the initial value for all uncertainties
for (int ii = 0; ii < noUncertainties; ii++) {
uResults[idx*noUncertainties*(nYears+1) + ii] = uncertParams[ii*6];
}
float* grMean;
grMean = (float*)malloc(noSpecies*sizeof(float));
for (int ii = 0; ii < noSpecies; ii++) {
grMean[ii] = speciesParams[ii*8];
}
// All future time periods
for (int ii = 0; ii < nYears; ii++) {
// Control to pick
int control = controls[idx*nYears + ii];
for (int jj = 0; jj < noSpecies; jj++) {
totalPops[idx*(nYears+1)*noSpecies + (ii+1)*noSpecies + jj] =
0;
// Adjust the global growth rate mean for this species at this
// time step for this path.
float jump = (uJumpsSpecies[idx*noSpecies*nYears +
ii*noSpecies + jj] < speciesParams[jj*8 + 5]) ?
1.0f : 0.0f;
float meanP = speciesParams[jj*8 + 1];
float reversion = speciesParams[jj*8 + 4];
float brownian = uBrownianSpecies[idx*noSpecies*nYears +
ii*noSpecies + jj]*speciesParams[jj*8 + 2];
float jumpSize = uJumpSizesSpecies[idx*noSpecies*nYears
+ ii*noSpecies + jj]*pow(speciesParams[
jj*8 + 5],2) - pow(speciesParams[jj*8 + 5],2)/2;
grMean[jj] = grMean[jj] + reversion*(meanP - grMean[jj])*
timeStep + grMean[jj]*brownian + (exp(jumpSize) - 1)*
grMean[jj]*jump;
// Initialise temporary populations
float initialPopulation = 0.0f;
for (int kk = 0; kk < noPatches; kk++) {
initialPopulation += pops[idx*(nYears+1)*noSpecies*
noPatches + ii*noSpecies*noPatches + jj*noPatches
+ kk];
}
// For each patch, update the population for the next time
// period by using the movement and mortality matrix for the
// correct species/control combination. We use registers due
// to their considerably lower latency over global memory.
for (int kk = 0; kk < noControls; kk++) {
// Overall population at this time period
float totalPop = 0.0f;
int iterator = 0;
for (int ll = 0; ll < noPatches; ll++) {
// Population for this patch
float population = 0.0f;
// Transfer animals from each destination patch to
// this one for the next period.
for (int mm = 0; mm < elemsPerCol[(jj*noControls + kk)*
noPatches + ll]; mm++) {
float value = pops[idx*(nYears+1)*noSpecies*
noPatches + ii*noSpecies*noPatches + jj*
noPatches + rowIdx[iterator + (jj*
noControls + kk)*maxElems]]*mmm[iterator +
(jj*noControls + kk)*maxElems];
population += value;
iterator++;
}
totalPop += population;
// We only update the actual populations if we are in
// the control that was selected. Save the total
// population for the start of the next time period.
if (kk == control && ii < nYears) {
// Population growth based on a mean-reverting process
rgr[idx*noSpecies*noPatches*nYears + ii*noSpecies*
noPatches + jj*noPatches + ll] = grMean[jj]
+ rgr[idx*noSpecies*noPatches*nYears + ii*
noSpecies*noPatches + jj*noPatches + ll]*
speciesParams[jj*8 + 7];
float gr = rgr[idx*noSpecies*noPatches*nYears + ii*
noSpecies*noPatches + jj*noPatches + ll];
pops[idx*(nYears+1)*noSpecies*noPatches + (ii+1)*
noSpecies*noPatches + jj*noPatches + ll] =
population*(1.0f + gr*(caps[jj*noPatches +
ll] - population)/caps[jj*noPatches + ll]/
100.0);
totalPops[idx*noSpecies*(nYears+1) + (ii+1)*
noSpecies + jj] += pops[idx*(nYears+1)*
noSpecies*noPatches + (ii+1)*noSpecies*
noPatches + jj*noPatches + ll];
}
}
// Save AAR for this control at this time
aars[idx*(nYears+1)*noControls*noSpecies + ii*noControls*
noSpecies + jj*noControls + kk] = totalPop/
initialPopulation;
}
}
// Other uncertainties
for (int jj = 0; jj < noUncertainties; jj++) {
float jump = (uJumps[idx*noUncertainties*nYears +
ii*noUncertainties + jj] < uncertParams[jj*6 + 5]) ?
1.0f : 0.0f;
float curr = uResults[idx*noUncertainties*(nYears+1) +
ii*noUncertainties + jj];
float meanP = uncertParams[jj*6 + 1];
float reversion = uncertParams[jj*6 + 3];
float brownian = uBrownian[idx*noUncertainties*nYears +
ii*noUncertainties + jj]*uncertParams[jj*6 + 2];
float jumpSize = uJumpSizes[idx*noUncertainties*nYears +
ii*noUncertainties + jj]*pow(uncertParams[jj*6 + 4],2)
- pow(uncertParams[jj*6 + 4],2)/2;
// Save the value of the uncertainty for the next time period
uResults[idx*noUncertainties*(nYears+1)+(ii+1)*noUncertainties+jj]
= curr + reversion*(meanP - curr)*timeStep +
curr*brownian + (exp(jumpSize) - 1)*curr*jump;
}
}
free(grMean);
}
} | f2d154f3614c4b79fe3313aeea2d6556fd18ae9b.cu | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void forwardPathKernel(int noPaths, int nYears, int noSpecies, int noPatches, int noControls, int noUncertainties, float timeStep, float* initPops, float* pops, float*mmm, int* rowIdx, int* elemsPerCol, int maxElems, float* speciesParams, float* caps, float* aars, float* uncertParams, int* controls, float* uJumps, float* uBrownian, float* uJumpSizes, float* uJumpsSpecies, float* uBrownianSpecies, float* uJumpSizesSpecies, float* rgr, float* uResults, float* totalPops) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// Only perform matrix multiplication sequentially for now. Later, if so
// desired, we can use dynamic parallelism because the card in the
// machine has CUDA compute compatability 3.5
if (idx < noPaths) {
// Initialise the population data at time t=0
for (int ii = 0; ii < noSpecies; ii++) {
float population = 0;
for (int jj = 0; jj < noPatches; jj++) {
pops[idx*(nYears+1)*noSpecies*noPatches + ii*noPatches + jj] =
initPops[jj];
population += pops[idx*(nYears+1)*noSpecies*noPatches + ii*
noPatches + jj];
}
totalPops[idx*(nYears+1)*noSpecies + ii] = population;
// The aars are computed in the next for loop.
}
// Carry over the initial value for all uncertainties
for (int ii = 0; ii < noUncertainties; ii++) {
uResults[idx*noUncertainties*(nYears+1) + ii] = uncertParams[ii*6];
}
float* grMean;
grMean = (float*)malloc(noSpecies*sizeof(float));
for (int ii = 0; ii < noSpecies; ii++) {
grMean[ii] = speciesParams[ii*8];
}
// All future time periods
for (int ii = 0; ii < nYears; ii++) {
// Control to pick
int control = controls[idx*nYears + ii];
for (int jj = 0; jj < noSpecies; jj++) {
totalPops[idx*(nYears+1)*noSpecies + (ii+1)*noSpecies + jj] =
0;
// Adjust the global growth rate mean for this species at this
// time step for this path.
float jump = (uJumpsSpecies[idx*noSpecies*nYears +
ii*noSpecies + jj] < speciesParams[jj*8 + 5]) ?
1.0f : 0.0f;
float meanP = speciesParams[jj*8 + 1];
float reversion = speciesParams[jj*8 + 4];
float brownian = uBrownianSpecies[idx*noSpecies*nYears +
ii*noSpecies + jj]*speciesParams[jj*8 + 2];
float jumpSize = uJumpSizesSpecies[idx*noSpecies*nYears
+ ii*noSpecies + jj]*pow(speciesParams[
jj*8 + 5],2) - pow(speciesParams[jj*8 + 5],2)/2;
grMean[jj] = grMean[jj] + reversion*(meanP - grMean[jj])*
timeStep + grMean[jj]*brownian + (exp(jumpSize) - 1)*
grMean[jj]*jump;
// Initialise temporary populations
float initialPopulation = 0.0f;
for (int kk = 0; kk < noPatches; kk++) {
initialPopulation += pops[idx*(nYears+1)*noSpecies*
noPatches + ii*noSpecies*noPatches + jj*noPatches
+ kk];
}
// For each patch, update the population for the next time
// period by using the movement and mortality matrix for the
// correct species/control combination. We use registers due
// to their considerably lower latency over global memory.
for (int kk = 0; kk < noControls; kk++) {
// Overall population at this time period
float totalPop = 0.0f;
int iterator = 0;
for (int ll = 0; ll < noPatches; ll++) {
// Population for this patch
float population = 0.0f;
// Transfer animals from each destination patch to
// this one for the next period.
for (int mm = 0; mm < elemsPerCol[(jj*noControls + kk)*
noPatches + ll]; mm++) {
float value = pops[idx*(nYears+1)*noSpecies*
noPatches + ii*noSpecies*noPatches + jj*
noPatches + rowIdx[iterator + (jj*
noControls + kk)*maxElems]]*mmm[iterator +
(jj*noControls + kk)*maxElems];
population += value;
iterator++;
}
totalPop += population;
// We only update the actual populations if we are in
// the control that was selected. Save the total
// population for the start of the next time period.
if (kk == control && ii < nYears) {
// Population growth based on a mean-reverting process
rgr[idx*noSpecies*noPatches*nYears + ii*noSpecies*
noPatches + jj*noPatches + ll] = grMean[jj]
+ rgr[idx*noSpecies*noPatches*nYears + ii*
noSpecies*noPatches + jj*noPatches + ll]*
speciesParams[jj*8 + 7];
float gr = rgr[idx*noSpecies*noPatches*nYears + ii*
noSpecies*noPatches + jj*noPatches + ll];
pops[idx*(nYears+1)*noSpecies*noPatches + (ii+1)*
noSpecies*noPatches + jj*noPatches + ll] =
population*(1.0f + gr*(caps[jj*noPatches +
ll] - population)/caps[jj*noPatches + ll]/
100.0);
totalPops[idx*noSpecies*(nYears+1) + (ii+1)*
noSpecies + jj] += pops[idx*(nYears+1)*
noSpecies*noPatches + (ii+1)*noSpecies*
noPatches + jj*noPatches + ll];
}
}
// Save AAR for this control at this time
aars[idx*(nYears+1)*noControls*noSpecies + ii*noControls*
noSpecies + jj*noControls + kk] = totalPop/
initialPopulation;
}
}
// Other uncertainties
for (int jj = 0; jj < noUncertainties; jj++) {
float jump = (uJumps[idx*noUncertainties*nYears +
ii*noUncertainties + jj] < uncertParams[jj*6 + 5]) ?
1.0f : 0.0f;
float curr = uResults[idx*noUncertainties*(nYears+1) +
ii*noUncertainties + jj];
float meanP = uncertParams[jj*6 + 1];
float reversion = uncertParams[jj*6 + 3];
float brownian = uBrownian[idx*noUncertainties*nYears +
ii*noUncertainties + jj]*uncertParams[jj*6 + 2];
float jumpSize = uJumpSizes[idx*noUncertainties*nYears +
ii*noUncertainties + jj]*pow(uncertParams[jj*6 + 4],2)
- pow(uncertParams[jj*6 + 4],2)/2;
// Save the value of the uncertainty for the next time period
uResults[idx*noUncertainties*(nYears+1)+(ii+1)*noUncertainties+jj]
= curr + reversion*(meanP - curr)*timeStep +
curr*brownian + (exp(jumpSize) - 1)*curr*jump;
}
}
free(grMean);
}
} |
26872031f470cd050fb1849812d09b88a9023418.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "build_tracks_kernels.h"
#include "reorganize_gplex.h"
#include "kalmanUpdater_kernels.h"
#include "computeChi2_kernels.h"
constexpr int BLOCK_SIZE_X = 16;
__device__ void getHitFromLayer_fn(LayerOfHitsCU& layer_of_hits,
GPlexQI& HitsIdx, GPlexHV& msPar, GPlexHS& msErr, int itrack_plex, int N)
{
if (itrack_plex < N)
{
int hit_idx = HitsIdx[itrack_plex];
if (hit_idx >= 0)
{
Hit &hit = layer_of_hits.m_hits[hit_idx];
GetHitErr(msErr, (char *)hit.errArrayCU(), 0, N);
GetHitPar(msPar, (char *)hit.posArrayCU(), 0, N);
}
}
}
__global__ void getHitFromLayer_kernel(LayerOfHitsCU& layer_of_hits,
GPlexQI HitsIdx, GPlexHV msPar, GPlexHS msErr, int N)
{
int itrack_plex = threadIdx.x + blockDim.x * blockIdx.x;
getHitFromLayer_fn(layer_of_hits, HitsIdx, msPar, msErr, itrack_plex, N);
}
void getHitFromLayer_wrappper( const hipStream_t& stream,
LayerOfHitsCU& layer_cu, GPlexQI& HitsIdx,
GPlexHV& msPar, GPlexHS& msErr, int N)
{
int gridx = (N-1)/BLOCK_SIZE_X + 1;
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( getHitFromLayer_kernel) , dim3(grid), dim3(block), 0, stream ,
layer_cu, HitsIdx, msPar, msErr, N);
}
__device__ void updateMissingHits_fn(GPlexQI& HitsIdx,
GPlexLV& Par_iP, GPlexLS& Err_iP,
GPlexLV& Par_iC, GPlexLS& Err_iC, int i, int N)
{
if (i < N)
{
if (HitsIdx[i] < 0)
{
two_steps_copy<7> (Err_iC, Err_iP, i);
two_steps_copy<6> (Par_iC, Par_iP, i);
}
}
}
__global__ void updateMissingHits_kernel(GPlexQI HitsIdx,
GPlexLV Par_iP, GPlexLS Err_iP,
GPlexLV Par_iC, GPlexLS Err_iC, int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
updateMissingHits_fn(HitsIdx, Par_iP, Err_iP, Par_iC, Err_iC, i, N);
}
void UpdateMissingHits_wrapper(
const hipStream_t& stream, GPlexQI& HitsIdx,
GPlexLV& Par_iP, GPlexLS& Err_iP,
GPlexLV& Par_iC, GPlexLS& Err_iC,
int N)
{
int gridx = (N-1)/BLOCK_SIZE_X + 1;
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( updateMissingHits_kernel) , dim3(grid), dim3(block), 0, stream ,
HitsIdx, Par_iP, Err_iP, Par_iC, Err_iC, N);
}
__device__
void UpdateWithLastHit_fn(
LayerOfHitsCU& layer_of_hits, GPlexQI& HitsIdx,
GPlexHV& msPar, GPlexHS& msErr,
GPlexLV& Par_iP, GPlexLS& Err_iP,
GPlexLV& Par_iC, GPlexLS& Err_iC,
int itrack_plex, int N)
{
getHitFromLayer_fn(layer_of_hits, HitsIdx, msPar, msErr, itrack_plex, N);
kalmanUpdate_fn(Err_iP, msErr, Par_iP, msPar, Par_iC, Err_iC, itrack_plex, N);
updateMissingHits_fn(HitsIdx, Par_iP, Err_iP, Par_iC, Err_iC, itrack_plex, N);
}
__global__
void UpdateWithLastHit_kernel(
LayerOfHitsCU& layer_of_hits, GPlexQI HitsIdx,
GPlexHV msPar, GPlexHS msErr,
GPlexLV Par_iP, GPlexLS Err_iP,
GPlexLV Par_iC, GPlexLS Err_iC,
int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
UpdateWithLastHit_fn(layer_of_hits, HitsIdx, msPar, msErr,
Par_iP, Err_iP, Par_iC, Err_iC, i, N);
}
void UpdateWithLastHit_wrapper(
const hipStream_t& stream,
LayerOfHitsCU& layer_cu, GPlexQI& HitsIdx,
GPlexHV& msPar, GPlexHS& msErr,
GPlexLV& Par_iP, GPlexLS& Err_iP,
GPlexLV& Par_iC, GPlexLS& Err_iC,
int N)
{
int gridx = (N-1)/BLOCK_SIZE_X + 1;
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( UpdateWithLastHit_kernel) , dim3(grid), dim3(block), 0, stream ,
layer_cu, HitsIdx, msPar, msErr,
Par_iP, Err_iP, Par_iC, Err_iC, N);
}
| 26872031f470cd050fb1849812d09b88a9023418.cu | #include "build_tracks_kernels.h"
#include "reorganize_gplex.h"
#include "kalmanUpdater_kernels.h"
#include "computeChi2_kernels.h"
constexpr int BLOCK_SIZE_X = 16;
__device__ void getHitFromLayer_fn(LayerOfHitsCU& layer_of_hits,
GPlexQI& HitsIdx, GPlexHV& msPar, GPlexHS& msErr, int itrack_plex, int N)
{
if (itrack_plex < N)
{
int hit_idx = HitsIdx[itrack_plex];
if (hit_idx >= 0)
{
Hit &hit = layer_of_hits.m_hits[hit_idx];
GetHitErr(msErr, (char *)hit.errArrayCU(), 0, N);
GetHitPar(msPar, (char *)hit.posArrayCU(), 0, N);
}
}
}
__global__ void getHitFromLayer_kernel(LayerOfHitsCU& layer_of_hits,
GPlexQI HitsIdx, GPlexHV msPar, GPlexHS msErr, int N)
{
int itrack_plex = threadIdx.x + blockDim.x * blockIdx.x;
getHitFromLayer_fn(layer_of_hits, HitsIdx, msPar, msErr, itrack_plex, N);
}
void getHitFromLayer_wrappper( const cudaStream_t& stream,
LayerOfHitsCU& layer_cu, GPlexQI& HitsIdx,
GPlexHV& msPar, GPlexHS& msErr, int N)
{
int gridx = (N-1)/BLOCK_SIZE_X + 1;
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
getHitFromLayer_kernel <<< grid, block, 0, stream >>>
(layer_cu, HitsIdx, msPar, msErr, N);
}
__device__ void updateMissingHits_fn(GPlexQI& HitsIdx,
GPlexLV& Par_iP, GPlexLS& Err_iP,
GPlexLV& Par_iC, GPlexLS& Err_iC, int i, int N)
{
if (i < N)
{
if (HitsIdx[i] < 0)
{
two_steps_copy<7> (Err_iC, Err_iP, i);
two_steps_copy<6> (Par_iC, Par_iP, i);
}
}
}
__global__ void updateMissingHits_kernel(GPlexQI HitsIdx,
GPlexLV Par_iP, GPlexLS Err_iP,
GPlexLV Par_iC, GPlexLS Err_iC, int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
updateMissingHits_fn(HitsIdx, Par_iP, Err_iP, Par_iC, Err_iC, i, N);
}
void UpdateMissingHits_wrapper(
const cudaStream_t& stream, GPlexQI& HitsIdx,
GPlexLV& Par_iP, GPlexLS& Err_iP,
GPlexLV& Par_iC, GPlexLS& Err_iC,
int N)
{
int gridx = (N-1)/BLOCK_SIZE_X + 1;
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
updateMissingHits_kernel <<< grid, block, 0, stream >>>
(HitsIdx, Par_iP, Err_iP, Par_iC, Err_iC, N);
}
__device__
void UpdateWithLastHit_fn(
LayerOfHitsCU& layer_of_hits, GPlexQI& HitsIdx,
GPlexHV& msPar, GPlexHS& msErr,
GPlexLV& Par_iP, GPlexLS& Err_iP,
GPlexLV& Par_iC, GPlexLS& Err_iC,
int itrack_plex, int N)
{
getHitFromLayer_fn(layer_of_hits, HitsIdx, msPar, msErr, itrack_plex, N);
kalmanUpdate_fn(Err_iP, msErr, Par_iP, msPar, Par_iC, Err_iC, itrack_plex, N);
updateMissingHits_fn(HitsIdx, Par_iP, Err_iP, Par_iC, Err_iC, itrack_plex, N);
}
__global__
void UpdateWithLastHit_kernel(
LayerOfHitsCU& layer_of_hits, GPlexQI HitsIdx,
GPlexHV msPar, GPlexHS msErr,
GPlexLV Par_iP, GPlexLS Err_iP,
GPlexLV Par_iC, GPlexLS Err_iC,
int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
UpdateWithLastHit_fn(layer_of_hits, HitsIdx, msPar, msErr,
Par_iP, Err_iP, Par_iC, Err_iC, i, N);
}
void UpdateWithLastHit_wrapper(
const cudaStream_t& stream,
LayerOfHitsCU& layer_cu, GPlexQI& HitsIdx,
GPlexHV& msPar, GPlexHS& msErr,
GPlexLV& Par_iP, GPlexLS& Err_iP,
GPlexLV& Par_iC, GPlexLS& Err_iC,
int N)
{
int gridx = (N-1)/BLOCK_SIZE_X + 1;
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
UpdateWithLastHit_kernel <<< grid, block, 0, stream >>>
(layer_cu, HitsIdx, msPar, msErr,
Par_iP, Err_iP, Par_iC, Err_iC, N);
}
|
0ca981de779bb6abfa388853b7c0b02d6444cef9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "detect_utils.h"
#include "cudaUtility.h"
template<typename T>
__global__ void gpuDetectionOverlay( T* input, T* output, int width, int height, Detection* detections, int numDetections, float4* colors )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
const T px_in = input[ y * width + x ];
T px_out = px_in;
const float fx = x;
const float fy = y;
for( int n=0; n < numDetections; n++ )
{
const Detection det = detections[n];
// check if this pixel is inside the bounding box
if( fx >= det.Left && fx <= det.Right && fy >= det.Top && fy <= det.Bottom )
{
const float4 color = colors[det.ClassID];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px_out.x = alpha * color.x + ialph * px_out.x;
px_out.y = alpha * color.y + ialph * px_out.y;
px_out.z = alpha * color.z + ialph * px_out.z;
}
}
output[y * width + x] = px_out;
}
template<typename T>
__global__ void gpuDetectionOverlayBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color )
{
const int box_x = blockIdx.x * blockDim.x + threadIdx.x;
const int box_y = blockIdx.y * blockDim.y + threadIdx.y;
if( box_x >= boxWidth || box_y >= boxHeight )
return;
const int x = box_x + x0;
const int y = box_y + y0;
if( x >= imgWidth || y >= imgHeight )
return;
T px = input[ y * imgWidth + x ];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
output[y * imgWidth + x] = px;
}
template<typename T>
hipError_t launchDetectionOverlay( T* input, T* output, uint32_t width, uint32_t height, Detection* detections, int numDetections, float4* colors )
{
if( !input || !output || width == 0 || height == 0 || !detections || numDetections == 0 || !colors )
return hipErrorInvalidValue;
// this assumes that the output already has the input image copied to it,
// which if input != output, is done first by detectNet::Detect()
for( int n=0; n < numDetections; n++ )
{
const int boxWidth = (int)detections[n].Width();
const int boxHeight = (int)detections[n].Height();
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y));
hipLaunchKernelGGL(( gpuDetectionOverlayBox<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, (int)detections[n].Left, (int)detections[n].Top, boxWidth, boxHeight, colors[detections[n].ClassID]);
}
return hipGetLastError();
}
hipError_t cudaDetectionOverlay( void* input, void* output, uint32_t width, uint32_t height, imageFormat format, Detection* detections, int numDetections, float4* colors )
{
if( format == IMAGE_RGB8 )
return launchDetectionOverlay<uchar3>((uchar3*)input, (uchar3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA8 )
return launchDetectionOverlay<uchar4>((uchar4*)input, (uchar4*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGB32F )
return launchDetectionOverlay<float3>((float3*)input, (float3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA32F )
return launchDetectionOverlay<float4>((float4*)input, (float4*)output, width, height, detections, numDetections, colors);
else
return hipErrorInvalidValue;
}
__global__ void gpuDetectionTransfer( float* input, Detection* output, const int numDets, const int entryLength )
{
const int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= numDets) return;
float* objects_n = input + n * entryLength;
output[n].Instance = n;
output[n].ClassID = (uint32_t)objects_n[5];
output[n].Confidence = objects_n[4];
output[n].Left = objects_n[0];
output[n].Top = objects_n[1];
output[n].Right = objects_n[2];
output[n].Bottom = objects_n[3];
}
hipError_t cudaDetectionTransfer( float* input, Detection* output, const int numDets, const int entryLength )
{
if( !input || !output )
return hipErrorInvalidValue;
const dim3 blockDim(32);
const dim3 gridDim(iDivUp(numDets, blockDim.x));
hipLaunchKernelGGL(( gpuDetectionTransfer), dim3(gridDim), dim3(blockDim), 0, 0, input, output, numDets, entryLength);
return hipGetLastError();
} | 0ca981de779bb6abfa388853b7c0b02d6444cef9.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "detect_utils.h"
#include "cudaUtility.h"
template<typename T>
__global__ void gpuDetectionOverlay( T* input, T* output, int width, int height, Detection* detections, int numDetections, float4* colors )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width || y >= height )
return;
const T px_in = input[ y * width + x ];
T px_out = px_in;
const float fx = x;
const float fy = y;
for( int n=0; n < numDetections; n++ )
{
const Detection det = detections[n];
// check if this pixel is inside the bounding box
if( fx >= det.Left && fx <= det.Right && fy >= det.Top && fy <= det.Bottom )
{
const float4 color = colors[det.ClassID];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px_out.x = alpha * color.x + ialph * px_out.x;
px_out.y = alpha * color.y + ialph * px_out.y;
px_out.z = alpha * color.z + ialph * px_out.z;
}
}
output[y * width + x] = px_out;
}
template<typename T>
__global__ void gpuDetectionOverlayBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color )
{
const int box_x = blockIdx.x * blockDim.x + threadIdx.x;
const int box_y = blockIdx.y * blockDim.y + threadIdx.y;
if( box_x >= boxWidth || box_y >= boxHeight )
return;
const int x = box_x + x0;
const int y = box_y + y0;
if( x >= imgWidth || y >= imgHeight )
return;
T px = input[ y * imgWidth + x ];
const float alpha = color.w / 255.0f;
const float ialph = 1.0f - alpha;
px.x = alpha * color.x + ialph * px.x;
px.y = alpha * color.y + ialph * px.y;
px.z = alpha * color.z + ialph * px.z;
output[y * imgWidth + x] = px;
}
template<typename T>
cudaError_t launchDetectionOverlay( T* input, T* output, uint32_t width, uint32_t height, Detection* detections, int numDetections, float4* colors )
{
if( !input || !output || width == 0 || height == 0 || !detections || numDetections == 0 || !colors )
return cudaErrorInvalidValue;
// this assumes that the output already has the input image copied to it,
// which if input != output, is done first by detectNet::Detect()
for( int n=0; n < numDetections; n++ )
{
const int boxWidth = (int)detections[n].Width();
const int boxHeight = (int)detections[n].Height();
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y));
gpuDetectionOverlayBox<T><<<gridDim, blockDim>>>(input, output, width, height, (int)detections[n].Left, (int)detections[n].Top, boxWidth, boxHeight, colors[detections[n].ClassID]);
}
return cudaGetLastError();
}
cudaError_t cudaDetectionOverlay( void* input, void* output, uint32_t width, uint32_t height, imageFormat format, Detection* detections, int numDetections, float4* colors )
{
if( format == IMAGE_RGB8 )
return launchDetectionOverlay<uchar3>((uchar3*)input, (uchar3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA8 )
return launchDetectionOverlay<uchar4>((uchar4*)input, (uchar4*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGB32F )
return launchDetectionOverlay<float3>((float3*)input, (float3*)output, width, height, detections, numDetections, colors);
else if( format == IMAGE_RGBA32F )
return launchDetectionOverlay<float4>((float4*)input, (float4*)output, width, height, detections, numDetections, colors);
else
return cudaErrorInvalidValue;
}
__global__ void gpuDetectionTransfer( float* input, Detection* output, const int numDets, const int entryLength )
{
const int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= numDets) return;
float* objects_n = input + n * entryLength;
output[n].Instance = n;
output[n].ClassID = (uint32_t)objects_n[5];
output[n].Confidence = objects_n[4];
output[n].Left = objects_n[0];
output[n].Top = objects_n[1];
output[n].Right = objects_n[2];
output[n].Bottom = objects_n[3];
}
cudaError_t cudaDetectionTransfer( float* input, Detection* output, const int numDets, const int entryLength )
{
if( !input || !output )
return cudaErrorInvalidValue;
const dim3 blockDim(32);
const dim3 gridDim(iDivUp(numDets, blockDim.x));
gpuDetectionTransfer<<<gridDim, blockDim>>>(input, output, numDets, entryLength);
return cudaGetLastError();
} |
82f583d829b53b9b4ea3d5f3a6a967a9eb8de5cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "DSKernels.cuh"
#include <opencv2\opencv.hpp>
//#define KERN_DEB
#define BLOCK_X 16
#define BLOCK_Y 16
// Exchange trick: Morgan McGuire, ShaderX 2008
#define s2(a,b) { unsigned short tmp = a; a = min(a,b); b = max(tmp,b); }
#define mn3(a,b,c) s2(a,b); s2(a,c);
#define mx3(a,b,c) s2(b,c); s2(a,c);
#define mnmx3(a,b,c) mx3(a,b,c); s2(a,b); // 3 exchanges
#define mnmx4(a,b,c,d) s2(a,b); s2(c,d); s2(a,c); s2(b,d); // 4 exchanges
#define mnmx5(a,b,c,d,e) s2(a,b); s2(c,d); mn3(a,c,e); mx3(b,d,e); // 6 exchanges
#define mnmx6(a,b,c,d,e,f) s2(a,d); s2(b,e); s2(c,f); mn3(a,b,c); mx3(d,e,f); // 7 exchanges
#define SMEM(x,y) smem[(x)+1][(y)+1]
#define IN(x,y) d_in[(y)*nx + (x)]
/////////////////////////////////////////////////////////////////////////////Helpers/////////////////////////////////////////////////////////////////////////////
#ifdef KERN_DEB
#include <iostream>
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if (err != hipSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, hipGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
#endif
#define DIVIDE_UP(a, b) (int)::ceil((float)a / (float)b)
/////////////////////////////////////////////////////////////////////////////Kernels/////////////////////////////////////////////////////////////////////////////
__global__
void census_transform_kernel(hipTextureObject_t input_im, unsigned long long int *output_census, int width, int height){
int image_row = blockIdx.y * blockDim.y + threadIdx.y;
int image_col = blockIdx.x * blockDim.x + threadIdx.x;
if (image_row < height && image_col < width){
unsigned char ref = tex2D<unsigned char>(input_im, image_col, image_row);
unsigned int sum1 = 0x0000;
sum1 =
((tex2D<unsigned char>(input_im, image_col - 3, image_row - 4) > ref) << 31) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row - 4) > ref) << 30) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row - 4) > ref) << 29) |
((tex2D<unsigned char>(input_im, image_col - 0, image_row - 4) > ref) << 28) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row - 4) > ref) << 27) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row - 4) > ref) << 26) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row - 4) > ref) << 25) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row - 3) > ref) << 24) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row - 3) > ref) << 23) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row - 3) > ref) << 22) |
((tex2D<unsigned char>(input_im, image_col - 0, image_row - 3) > ref) << 21) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row - 3) > ref) << 20) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row - 3) > ref) << 19) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row - 3) > ref) << 18) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row - 2) > ref) << 17) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row - 2) > ref) << 16) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row - 2) > ref) << 15) |
((tex2D<unsigned char>(input_im, image_col - 0, image_row - 2) > ref) << 14) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row - 2) > ref) << 13) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row - 2) > ref) << 12) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row - 2) > ref) << 11) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row - 1) > ref) << 10) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row - 1) > ref) << 9) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row - 1) > ref) << 8) |
((tex2D<unsigned char>(input_im, image_col - 0, image_row - 1) > ref) << 7) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row - 1) > ref) << 6) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row - 1) > ref) << 5) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row - 1) > ref) << 4) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row - 0) > ref) << 3) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row - 0) > ref) << 2) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row - 0) > ref) << 1) |
((tex2D<unsigned char>(input_im, image_col - 0, image_row - 0) > ref) << 0);
unsigned int sum2 = 0x0000;
sum2 =
((tex2D<unsigned char>(input_im, image_col + 0, image_row + 0) > ref) << 31) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row + 0) > ref) << 30) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row + 0) > ref) << 29) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row + 0) > ref) << 28) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row + 1) > ref) << 27) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row + 1) > ref) << 26) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row + 1) > ref) << 25) |
((tex2D<unsigned char>(input_im, image_col + 0, image_row + 1) > ref) << 24) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row + 1) > ref) << 23) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row + 1) > ref) << 22) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row + 1) > ref) << 21) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row + 2) > ref) << 20) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row + 2) > ref) << 19) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row + 2) > ref) << 18) |
((tex2D<unsigned char>(input_im, image_col + 0, image_row + 2) > ref) << 17) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row + 2) > ref) << 16) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row + 2) > ref) << 15) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row + 2) > ref) << 14) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row + 3) > ref) << 13) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row + 3) > ref) << 12) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row + 3) > ref) << 11) |
((tex2D<unsigned char>(input_im, image_col + 0, image_row + 3) > ref) << 10) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row + 3) > ref) << 9) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row + 3) > ref) << 8) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row + 3) > ref) << 7) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row + 4) > ref) << 6) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row + 4) > ref) << 5) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row + 4) > ref) << 4) |
((tex2D<unsigned char>(input_im, image_col + 0, image_row + 4) > ref) << 3) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row + 4) > ref) << 2) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row + 4) > ref) << 1) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row + 4) > ref) << 0);
uint2 temp = make_uint2(sum1, sum2);
output_census[image_row * width + image_col] = *reinterpret_cast<unsigned long long int*>(&temp);
}
}
__global__
void cross_construct_kernel(hipTextureObject_t input_im, uchar4 *ouput_arm_vol, int arm_length, int max_arm_length, int arm_threshold, int strict_arm_threshold, int width, int height){
int image_col = blockIdx.x * blockDim.x + threadIdx.x;
int image_row = blockIdx.y * blockDim.y + threadIdx.y;
if (image_row < height && image_col < width){
uchar4 pix_arm = make_uchar4(0, 0, 0, 0);
int ref = tex2D<unsigned char>(input_im, image_col, image_row);
int scan_length, diff_curr_ref, diff_curr_next;
//Upward scan
scan_length = 0; diff_curr_ref = 0; diff_curr_next = 0;
while (true)
{
diff_curr_ref = abs(ref - tex2D<unsigned char>(input_im, image_col, image_row - scan_length));
diff_curr_next = abs(ref - tex2D<unsigned char>(input_im, image_col, image_row - scan_length - 1));
if (!(scan_length < max_arm_length &&
image_row - scan_length > 0 &&
diff_curr_ref <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold) &&
diff_curr_next <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold))) break;
scan_length++;
}
pix_arm.x = scan_length;
//Downward scan
scan_length = 0; diff_curr_ref = 0; diff_curr_next = 0;
while (true)
{
diff_curr_ref = abs(ref - tex2D<unsigned char>(input_im, image_col, image_row + scan_length));
diff_curr_next = abs(ref - tex2D<unsigned char>(input_im, image_col, image_row + scan_length + 1));
if (!(scan_length < max_arm_length &&
image_row + scan_length < height - 1 &&
diff_curr_ref <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold) &&
diff_curr_next <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold))) break;
scan_length++;
}
pix_arm.y = scan_length;
//Leftward scan
scan_length = 0; diff_curr_ref = 0; diff_curr_next = 0;
while (true)
{
diff_curr_ref = abs(ref - tex2D<unsigned char>(input_im, image_col - scan_length, image_row));
diff_curr_next = abs(ref - tex2D<unsigned char>(input_im, image_col - scan_length - 1, image_row));
if (!(scan_length < max_arm_length &&
image_col - scan_length > 0 &&
diff_curr_ref <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold) &&
diff_curr_next <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold))) break;
scan_length++;
}
pix_arm.z = scan_length;
//Rightward scan
scan_length = 0; diff_curr_ref = 0; diff_curr_next = 0;
while (true)
{
diff_curr_ref = abs(ref - tex2D<unsigned char>(input_im, image_col + scan_length, image_row));
diff_curr_next = abs(ref - tex2D<unsigned char>(input_im, image_col + scan_length + 1, image_row));
if (!(scan_length < max_arm_length &&
image_col + scan_length < width &&
diff_curr_ref <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold) &&
diff_curr_next <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold))) break;
scan_length++;
}
pix_arm.w = scan_length;
pix_arm.x = pix_arm.x == 0 ? (image_row - 2 >= 0 ? 2 : 0) : pix_arm.x;
pix_arm.y = pix_arm.y == 0 ? (image_row + 2 < height ? 2 : 0) : pix_arm.y;
//pix_arm.x = image_row - 2 >= 0 ? 2 : pix_arm.x;
//pix_arm.y = image_row + 2 < height ? 2 : pix_arm.y;
pix_arm.z = pix_arm.z == 0 ? (image_col - 2 >= 0 ? 2 : 0) : pix_arm.z;
pix_arm.w = pix_arm.w == 0 ? (image_col + 2 < width ? 2 : 0) : pix_arm.w;
ouput_arm_vol[image_row * width + image_col] = pix_arm;
}
}
__global__
void cost_initialization_kernel(unsigned char *left, unsigned char *right, unsigned long long int *left_census, unsigned long long int *right_census, float *cost_vol, float ad_gamma, float census_gamma, bool left_to_right, int width, int height){
extern __shared__ unsigned char temp[];
unsigned char *ref_temp = temp;
unsigned char *targ_temp = &ref_temp[blockDim.x];
unsigned long long int *ref_census_temp = (unsigned long long int*)&targ_temp[blockDim.x * 2];
unsigned long long int *targ_census_temp = &ref_census_temp[blockDim.x];
//Initialize to zero
ref_temp[threadIdx.x] = 0;
targ_temp[threadIdx.x] = 0;
targ_temp[blockDim.x + threadIdx.x] = 0;
ref_census_temp[threadIdx.x] = 0;
targ_census_temp[threadIdx.x] = 0;
targ_census_temp[blockDim.x + threadIdx.x] = 0;
__syncthreads();
int image_row = blockIdx.y;
float cost = 0.0f;
if (image_row < height){
if (left_to_right){
for (int image_col = 0; image_col < width; image_col++){
int block_index = image_col % blockDim.x;
if (block_index == 0){
if (image_col + threadIdx.x < width){
ref_temp[threadIdx.x] = left[image_row * width + image_col + threadIdx.x];
ref_census_temp[threadIdx.x] = left_census[image_row * width + image_col + threadIdx.x];
}
if (image_col + threadIdx.x < width){
targ_temp[blockDim.x + threadIdx.x] = right[image_row * width + image_col + threadIdx.x];
targ_census_temp[blockDim.x + threadIdx.x] = right_census[image_row * width + image_col + threadIdx.x];
}
if ((int)(image_col - blockDim.x + threadIdx.x) >= 0 && (int)(image_col - blockDim.x + threadIdx.x) < width){
targ_temp[threadIdx.x] = right[image_row * width + image_col - blockDim.x + threadIdx.x];
targ_census_temp[threadIdx.x] = right_census[image_row * width + image_col - blockDim.x + threadIdx.x];
}
__syncthreads();
}
float ad_cost, census_cost;
ad_cost = (fabsf(ref_temp[block_index] - targ_temp[blockDim.x + block_index - threadIdx.x]) / 255.0f) * ad_gamma;
census_cost = (__popcll(ref_census_temp[block_index] ^ targ_census_temp[blockDim.x + block_index - threadIdx.x]) / 64.0f) * census_gamma;
cost += ad_cost + census_cost;
cost_vol[image_row * width * blockDim.x + image_col * blockDim.x + threadIdx.x] = cost;
}
}
else{
for (int image_col = 0; image_col < width; image_col++){
int block_index = image_col % blockDim.x;
if (block_index == 0){
if (image_col + threadIdx.x < width){
ref_temp[threadIdx.x] = right[image_row * width + image_col + threadIdx.x];
ref_census_temp[threadIdx.x] = right_census[image_row * width + image_col + threadIdx.x];
}
if (image_col + threadIdx.x < width){
targ_temp[threadIdx.x] = left[image_row * width + image_col + threadIdx.x];
targ_census_temp[threadIdx.x] = left_census[image_row * width + image_col + threadIdx.x];
}
if (image_col + blockDim.x + threadIdx.x < width){
targ_temp[blockDim.x + threadIdx.x] = left[image_row * width + image_col + blockDim.x + threadIdx.x];
targ_census_temp[blockDim.x + threadIdx.x] = left_census[image_row * width + image_col + blockDim.x + threadIdx.x];
}
__syncthreads();
}
float ad_cost, census_cost;
ad_cost = (fabsf(ref_temp[block_index] - targ_temp[block_index + threadIdx.x]) / 255.0f) * ad_gamma;
census_cost = (__popcll(ref_census_temp[block_index] ^ targ_census_temp[block_index + threadIdx.x]) / 64.0f) * census_gamma;
cost += ad_cost + census_cost;
cost_vol[image_row * width * blockDim.x + image_col * blockDim.x + threadIdx.x] = cost;
}
}
}
}
__global__
void horizontal_aggregation_kernel(float *cost_vol_in, uchar4 *arm_vol, float *cost_vol_out, int width, int height){
int image_col = blockIdx.x;
float sum = 0.0f;
for (int image_row = 0; image_row < height; image_row++){
uchar4 pixel_arm = arm_vol[image_row * width + image_col];
int right_limit = image_col + pixel_arm.w;
int left_limit = image_col - pixel_arm.z - 1;
float aggregate = cost_vol_in[image_row * width * blockDim.x + right_limit * blockDim.x + threadIdx.x];
if (left_limit >= 0)
aggregate -= cost_vol_in[image_row * width * blockDim.x + left_limit * blockDim.x + threadIdx.x];
sum += aggregate;
cost_vol_out[image_row * width * blockDim.x + image_col * blockDim.x + threadIdx.x] = sum;
}
}
__global__
void vertical_aggregation_kernel(float *cost_vol_in, uchar4 *arm_vol, float *cost_vol_out, unsigned short *disp_im, int width, int height){
int image_row = blockIdx.y;
__shared__ unsigned int reduce_cache[32];
__shared__ float cost_cache[256];
for (int image_col = 0; image_col < width; image_col++){
uchar4 pix_arm = arm_vol[image_row * width + image_col];
int down_lim = image_row + pix_arm.y;
int up_lim = image_row - pix_arm.x - 1;
float aggregate = cost_vol_in[down_lim * width * blockDim.x + image_col * blockDim.x + threadIdx.x];
if (up_lim >= 0)
aggregate -= cost_vol_in[up_lim * width * blockDim.x + image_col * blockDim.x + threadIdx.x];
//cost_vol_out[image_row * width * blockDim.x + image_col * blockDim.x + threadIdx.x] = aggregate;
cost_cache[threadIdx.x] = aggregate;
//Find the minimum
unsigned int min_cost = (((unsigned int)(aggregate * 10000)) << 8) | threadIdx.x;
unsigned int temp_min_cost = 0;
int lane = threadIdx.x % 32;
int wid = threadIdx.x / 32;
temp_min_cost = __shfl_down(min_cost, 16);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 8);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 4);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 2);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 1);
min_cost = min(min_cost, temp_min_cost);
if (lane == 0) reduce_cache[wid] = min_cost;
__syncthreads();
min_cost = (threadIdx.x < blockDim.x / 32) ? reduce_cache[lane] : UINT_MAX;
if (wid == 0){
temp_min_cost = __shfl_down(min_cost, 4);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 2);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 1);
min_cost = min(min_cost, temp_min_cost);
}
if (threadIdx.x == 0){
unsigned short disp = (unsigned short)((min_cost & 0x000000FF));
if (disp >= 1 && disp < blockDim.x - 1)
disp_im[image_row * width + image_col] = (unsigned short)((disp + ((cost_cache[disp + 1] - cost_cache[disp - 1]) / (2 * (-cost_cache[disp + 1] - cost_cache[disp - 1] + 2 * cost_cache[disp])))) * 256.0f);
else
disp_im[image_row * width + image_col] = disp << 8;
}
}
}
__global__
void consistency_check_kernel(hipTextureObject_t left_disp_im, hipTextureObject_t right_disp_im, unsigned short *output_disp_im, int disparity_tolerance, int width, int height){
int col_to_access = blockIdx.x * blockDim.x + threadIdx.x;
int row_to_access = blockIdx.y * blockDim.y + threadIdx.y;
if (row_to_access < height && col_to_access < width){
unsigned short disp = tex2D<unsigned short>(left_disp_im, col_to_access, row_to_access);
unsigned short to_check = tex2D<unsigned short>(right_disp_im, col_to_access - (disp >> 8), row_to_access);
output_disp_im[row_to_access * width + col_to_access] = (abs(disp - to_check) <= disparity_tolerance * 256) ? disp : OUTLIER;
}
}
__global__
void horizontal_voting_kernel(hipTextureObject_t input_disp, uchar4 *arm_vol, unsigned short *output_disp, int width, int height){
int image_row = blockIdx.y * blockDim.y + threadIdx.y;
int image_col = blockIdx.x * blockDim.x + threadIdx.x;
if (image_row < height && image_col < width){
int sums[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int eligible_votes = 0;
int no_of_votes = 0;
//Load arm data
uchar4 pix_arm = arm_vol[image_col + image_row * width];
//Check disp value
int disp_value = tex2D<unsigned short>(input_disp, image_col, image_row);
if (disp_value == OUTLIER){
for (int pix_iter = -pix_arm.z; pix_iter <= pix_arm.w; pix_iter++){
int disp_val = tex2D<unsigned short>(input_disp, image_col + pix_iter, image_row);
if (disp_val != OUTLIER){
sums[0] += ((disp_val & 1) != 0);
sums[1] += ((disp_val & 2) != 0);
sums[2] += ((disp_val & 4) != 0);
sums[3] += ((disp_val & 8) != 0);
sums[4] += ((disp_val & 16) != 0);
sums[5] += ((disp_val & 32) != 0);
sums[6] += ((disp_val & 64) != 0);
sums[7] += ((disp_val & 128) != 0);
sums[8] += ((disp_val & 256) != 0);
sums[9] += ((disp_val & 512) != 0);
sums[10] += ((disp_val & 1024) != 0);
sums[11] += ((disp_val & 2048) != 0);
sums[12] += ((disp_val & 4096) != 0);
sums[13] += ((disp_val & 8192) != 0);
sums[14] += ((disp_val & 16384) != 0);
sums[15] += ((disp_val & 32768) != 0);
eligible_votes++;
}
no_of_votes++;
}
__syncthreads();
int majority = eligible_votes * 0.5;
disp_value = (
((sums[15] > majority) << 15) +
((sums[14] > majority) << 14) +
((sums[13] > majority) << 13) +
((sums[12] > majority) << 12) +
((sums[11] > majority) << 11) +
((sums[10] > majority) << 10) +
((sums[9] > majority) << 9) +
((sums[8] > majority) << 8) +
((sums[7] > majority) << 7) +
((sums[6] > majority) << 6) +
((sums[5] > majority) << 5) +
((sums[4] > majority) << 4) +
((sums[3] > majority) << 3) +
((sums[2] > majority) << 2) +
((sums[1] > majority) << 1) +
((sums[0] > majority) << 0));
disp_value = (eligible_votes > no_of_votes * 0.35f) ? disp_value : OUTLIER;
}
output_disp[image_col + image_row * width] = disp_value;
}
}
__global__
void vertical_voting_kernel(hipTextureObject_t input_disp, uchar4 *arm_vol, unsigned short *output_disp, int width, int height){
int image_row = blockIdx.y * blockDim.y + threadIdx.y;
int image_col = blockIdx.x * blockDim.x + threadIdx.x;
if (image_row < height && image_col < width){
int sums[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int eligible_votes = 0;
int no_of_votes = 0;
//Load arm data
uchar4 pix_arm = arm_vol[image_col + image_row * width];
//Check disp value
int disp_value = tex2D<unsigned short>(input_disp, image_col, image_row);
if (disp_value == OUTLIER){
for (int pix_iter = -pix_arm.x; pix_iter <= pix_arm.y; pix_iter++){
int disp_val = tex2D<unsigned char>(input_disp, image_col, image_row + pix_iter);
if (disp_val != OUTLIER){
sums[0] += ((disp_val & 1) != 0);
sums[1] += ((disp_val & 2) != 0);
sums[2] += ((disp_val & 4) != 0);
sums[3] += ((disp_val & 8) != 0);
sums[4] += ((disp_val & 16) != 0);
sums[5] += ((disp_val & 32) != 0);
sums[6] += ((disp_val & 64) != 0);
sums[7] += ((disp_val & 128) != 0);
sums[8] += ((disp_val & 256) != 0);
sums[9] += ((disp_val & 512) != 0);
sums[10] += ((disp_val & 1024) != 0);
sums[11] += ((disp_val & 2048) != 0);
sums[12] += ((disp_val & 4096) != 0);
sums[13] += ((disp_val & 8192) != 0);
sums[14] += ((disp_val & 16384) != 0);
sums[15] += ((disp_val & 32768) != 0);
eligible_votes++;
}
no_of_votes++;
}
__syncthreads();
int majority = eligible_votes * 0.5;
disp_value = (
((sums[15] > majority) << 15) +
((sums[14] > majority) << 14) +
((sums[13] > majority) << 13) +
((sums[12] > majority) << 12) +
((sums[11] > majority) << 11) +
((sums[10] > majority) << 10) +
((sums[9] > majority) << 9) +
((sums[8] > majority) << 8) +
((sums[7] > majority) << 7) +
((sums[6] > majority) << 6) +
((sums[5] > majority) << 5) +
((sums[4] > majority) << 4) +
((sums[3] > majority) << 3) +
((sums[2] > majority) << 2) +
((sums[1] > majority) << 1) +
((sums[0] > majority) << 0));
disp_value = (eligible_votes > no_of_votes * 0.35f) ? disp_value : OUTLIER;
}
output_disp[image_col + image_row * width] = disp_value;
}
}
__global__
void median_filter_kernel(unsigned short *d_in, unsigned short *d_out, int nx, int ny)
{
int tx = threadIdx.x, ty = threadIdx.y;
// guards: is at boundary?
bool is_x_top = (tx == 0), is_x_bot = (tx == BLOCK_X - 1);
bool is_y_top = (ty == 0), is_y_bot = (ty == BLOCK_Y - 1);
__shared__ unsigned short smem[BLOCK_X + 2][BLOCK_Y + 2];
// clear out shared memory (zero padding)
if (is_x_top) SMEM(tx - 1, ty) = 0;
else if (is_x_bot) SMEM(tx + 1, ty) = 0;
if (is_y_top) {
SMEM(tx, ty - 1) = 0;
if (is_x_top) SMEM(tx - 1, ty - 1) = 0;
else if (is_x_bot) SMEM(tx + 1, ty - 1) = 0;
}
else if (is_y_bot) {
SMEM(tx, ty + 1) = 0;
if (is_x_top) SMEM(tx - 1, ty + 1) = 0;
else if (is_x_bot) SMEM(tx + 1, ty + 1) = 0;
}
// guards: is at boundary and still more image?
int x = blockIdx.x * blockDim.x + tx;
int y = blockIdx.y * blockDim.y + ty;
is_x_top &= (x > 0); is_x_bot &= (x < nx - 1);
is_y_top &= (y > 0); is_y_bot &= (y < ny - 1);
// each thread pulls from image
SMEM(tx, ty) = IN(x, y); // self
if (is_x_top) SMEM(tx - 1, ty) = IN(x - 1, y);
else if (is_x_bot) SMEM(tx + 1, ty) = IN(x + 1, y);
if (is_y_top) {
SMEM(tx, ty - 1) = IN(x, y - 1);
if (is_x_top) SMEM(tx - 1, ty - 1) = IN(x - 1, y - 1);
else if (is_x_bot) SMEM(tx + 1, ty - 1) = IN(x + 1, y - 1);
}
else if (is_y_bot) {
SMEM(tx, ty + 1) = IN(x, y + 1);
if (is_x_top) SMEM(tx - 1, ty + 1) = IN(x - 1, y + 1);
else if (is_x_bot) SMEM(tx + 1, ty + 1) = IN(x + 1, y + 1);
}
__syncthreads();
// pull top six from shared memory
unsigned short v[6] = { SMEM(tx - 1, ty - 1), SMEM(tx, ty - 1), SMEM(tx + 1, ty - 1),
SMEM(tx - 1, ty), SMEM(tx, ty), SMEM(tx + 1, ty) };
// with each pass, remove min and max values and add new value
mnmx6(v[0], v[1], v[2], v[3], v[4], v[5]);
v[5] = SMEM(tx - 1, ty + 1); // add new contestant
mnmx5(v[1], v[2], v[3], v[4], v[5]);
v[5] = SMEM(tx, ty + 1);
mnmx4(v[2], v[3], v[4], v[5]);
v[5] = SMEM(tx + 1, ty + 1);
mnmx3(v[3], v[4], v[5]);
// pick the middle one
d_out[y*nx + x] = v[4];
}
/////////////////////////////////////////////////////////////////////////////Stubs/////////////////////////////////////////////////////////////////////////////
void census_transform(hipTextureObject_t input_im, unsigned long long int *output_census, int width, int height, hipStream_t stream){
dim3 threads(16, 16);
dim3 blocks(DIVIDE_UP(width, threads.x), DIVIDE_UP(height, threads.y));
census_transform_kernel << <blocks, threads >> >(input_im, output_census, width, height);
#ifdef KERN_DEB
SAFE_CALL(hipDeviceSynchronize(), "Census transform failed.");
#endif
}
void cross_construct(hipTextureObject_t input_im, uchar4 *arm_vol, int arm_length, int max_arm_length, int arm_threshold, int strict_arm_threshold, int width, int height, hipStream_t stream){
dim3 threads(16, 16);
dim3 blocks(DIVIDE_UP(width, threads.x), DIVIDE_UP(height, threads.y));
cross_construct_kernel << <blocks, threads >> >(input_im, arm_vol, arm_length, max_arm_length, arm_threshold, strict_arm_threshold, width, height);
#ifdef KERN_DEB
SAFE_CALL(hipDeviceSynchronize(), "Cross construct failed.");
#endif
}
void match(unsigned char *left, unsigned char *right,
unsigned long long int *left_census, unsigned long long int *right_census, float *cost_vol_temp_a, float *cost_vol_temp_b, uchar4 *arm_vol,
unsigned short *disp_im, float gamma, float census_gamma, bool left_to_right, int width, int height, int max_disparity, hipStream_t stream){
dim3 b(1, height); dim3 t(max_disparity);
size_t mem_sz = t.x * (sizeof(unsigned long long int) + sizeof(unsigned char)) * 3;
cost_initialization_kernel << <b, t, mem_sz >> >(left, right, left_census, right_census, (float*)cost_vol_temp_a, gamma, census_gamma, left_to_right, width, height);
#ifdef KERN_DEB
SAFE_CALL(hipDeviceSynchronize(), "Cost initialization failed.");
#endif
b = dim3(width);
t = dim3(max_disparity);
horizontal_aggregation_kernel << < b, t >> > ((float*)cost_vol_temp_a, arm_vol, (float*)cost_vol_temp_b, width, height);
#ifdef KERN_DEB
SAFE_CALL(hipDeviceSynchronize(), "Horizontal aggregation failed.");
#endif
b = dim3(1, height);
t = dim3(max_disparity);
vertical_aggregation_kernel << < b, t >> > ((float*)cost_vol_temp_b, arm_vol, (float*)cost_vol_temp_a, disp_im, width, height);
#ifdef KERN_DEB
SAFE_CALL(hipDeviceSynchronize(), "Horizontal aggregation failed.");
#endif
}
void check_consistency(hipTextureObject_t left_disp_im, hipTextureObject_t right_disp_im, unsigned short *output_disp_im, int disparity_tolerance, int width, int height, hipStream_t stream){
dim3 threads(16, 16);
dim3 blocks(DIVIDE_UP(width, threads.x), DIVIDE_UP(height, threads.y));
consistency_check_kernel << <blocks, threads >> >(left_disp_im, right_disp_im, output_disp_im, disparity_tolerance, width, height);
#ifdef KERN_DEB
SAFE_CALL(hipDeviceSynchronize(), "Consistency check failed.");
#endif
}
void horizontal_voting(hipTextureObject_t input_disp, uchar4 *arm_vol, unsigned short *output_disp, int width, int height, hipStream_t stream){
dim3 threads(16, 16);
dim3 blocks(DIVIDE_UP(width, threads.x), DIVIDE_UP(height, threads.y));
horizontal_voting_kernel << <blocks, threads >> >(input_disp, arm_vol, output_disp, width, height);
#ifdef KERN_DEB
SAFE_CALL(hipDeviceSynchronize(), "Horizontal voting failed.");
#endif
}
void vertical_voting(hipTextureObject_t input_disp, uchar4 *arm_vol, unsigned short *output_disp, int width, int height, hipStream_t stream){
dim3 threads(16, 16);
dim3 blocks(DIVIDE_UP(width, threads.x), DIVIDE_UP(height, threads.y));
vertical_voting_kernel << <blocks, threads >> >(input_disp, arm_vol, output_disp, width, height);
#ifdef KERN_DEB
SAFE_CALL(hipDeviceSynchronize(), "Vertical voting failed.");
#endif
}
void median_filter(unsigned short *input_disp, unsigned short *output_disp, int width, int height){
dim3 blocks(width / BLOCK_X, height / BLOCK_Y);
dim3 threads(BLOCK_X, BLOCK_Y);
median_filter_kernel << <blocks, threads >> >(input_disp, output_disp, width, height);
}
| 82f583d829b53b9b4ea3d5f3a6a967a9eb8de5cf.cu | #include "DSKernels.cuh"
#include <opencv2\opencv.hpp>
//#define KERN_DEB
#define BLOCK_X 16
#define BLOCK_Y 16
// Exchange trick: Morgan McGuire, ShaderX 2008
#define s2(a,b) { unsigned short tmp = a; a = min(a,b); b = max(tmp,b); }
#define mn3(a,b,c) s2(a,b); s2(a,c);
#define mx3(a,b,c) s2(b,c); s2(a,c);
#define mnmx3(a,b,c) mx3(a,b,c); s2(a,b); // 3 exchanges
#define mnmx4(a,b,c,d) s2(a,b); s2(c,d); s2(a,c); s2(b,d); // 4 exchanges
#define mnmx5(a,b,c,d,e) s2(a,b); s2(c,d); mn3(a,c,e); mx3(b,d,e); // 6 exchanges
#define mnmx6(a,b,c,d,e,f) s2(a,d); s2(b,e); s2(c,f); mn3(a,b,c); mx3(d,e,f); // 7 exchanges
#define SMEM(x,y) smem[(x)+1][(y)+1]
#define IN(x,y) d_in[(y)*nx + (x)]
/////////////////////////////////////////////////////////////////////////////Helpers/////////////////////////////////////////////////////////////////////////////
#ifdef KERN_DEB
#include <iostream>
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, cudaGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
#endif
#define DIVIDE_UP(a, b) (int)std::ceil((float)a / (float)b)
/////////////////////////////////////////////////////////////////////////////Kernels/////////////////////////////////////////////////////////////////////////////
__global__
void census_transform_kernel(cudaTextureObject_t input_im, unsigned long long int *output_census, int width, int height){
int image_row = blockIdx.y * blockDim.y + threadIdx.y;
int image_col = blockIdx.x * blockDim.x + threadIdx.x;
if (image_row < height && image_col < width){
unsigned char ref = tex2D<unsigned char>(input_im, image_col, image_row);
unsigned int sum1 = 0x0000;
sum1 =
((tex2D<unsigned char>(input_im, image_col - 3, image_row - 4) > ref) << 31) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row - 4) > ref) << 30) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row - 4) > ref) << 29) |
((tex2D<unsigned char>(input_im, image_col - 0, image_row - 4) > ref) << 28) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row - 4) > ref) << 27) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row - 4) > ref) << 26) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row - 4) > ref) << 25) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row - 3) > ref) << 24) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row - 3) > ref) << 23) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row - 3) > ref) << 22) |
((tex2D<unsigned char>(input_im, image_col - 0, image_row - 3) > ref) << 21) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row - 3) > ref) << 20) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row - 3) > ref) << 19) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row - 3) > ref) << 18) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row - 2) > ref) << 17) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row - 2) > ref) << 16) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row - 2) > ref) << 15) |
((tex2D<unsigned char>(input_im, image_col - 0, image_row - 2) > ref) << 14) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row - 2) > ref) << 13) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row - 2) > ref) << 12) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row - 2) > ref) << 11) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row - 1) > ref) << 10) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row - 1) > ref) << 9) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row - 1) > ref) << 8) |
((tex2D<unsigned char>(input_im, image_col - 0, image_row - 1) > ref) << 7) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row - 1) > ref) << 6) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row - 1) > ref) << 5) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row - 1) > ref) << 4) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row - 0) > ref) << 3) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row - 0) > ref) << 2) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row - 0) > ref) << 1) |
((tex2D<unsigned char>(input_im, image_col - 0, image_row - 0) > ref) << 0);
unsigned int sum2 = 0x0000;
sum2 =
((tex2D<unsigned char>(input_im, image_col + 0, image_row + 0) > ref) << 31) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row + 0) > ref) << 30) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row + 0) > ref) << 29) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row + 0) > ref) << 28) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row + 1) > ref) << 27) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row + 1) > ref) << 26) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row + 1) > ref) << 25) |
((tex2D<unsigned char>(input_im, image_col + 0, image_row + 1) > ref) << 24) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row + 1) > ref) << 23) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row + 1) > ref) << 22) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row + 1) > ref) << 21) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row + 2) > ref) << 20) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row + 2) > ref) << 19) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row + 2) > ref) << 18) |
((tex2D<unsigned char>(input_im, image_col + 0, image_row + 2) > ref) << 17) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row + 2) > ref) << 16) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row + 2) > ref) << 15) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row + 2) > ref) << 14) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row + 3) > ref) << 13) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row + 3) > ref) << 12) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row + 3) > ref) << 11) |
((tex2D<unsigned char>(input_im, image_col + 0, image_row + 3) > ref) << 10) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row + 3) > ref) << 9) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row + 3) > ref) << 8) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row + 3) > ref) << 7) |
((tex2D<unsigned char>(input_im, image_col - 3, image_row + 4) > ref) << 6) |
((tex2D<unsigned char>(input_im, image_col - 2, image_row + 4) > ref) << 5) |
((tex2D<unsigned char>(input_im, image_col - 1, image_row + 4) > ref) << 4) |
((tex2D<unsigned char>(input_im, image_col + 0, image_row + 4) > ref) << 3) |
((tex2D<unsigned char>(input_im, image_col + 1, image_row + 4) > ref) << 2) |
((tex2D<unsigned char>(input_im, image_col + 2, image_row + 4) > ref) << 1) |
((tex2D<unsigned char>(input_im, image_col + 3, image_row + 4) > ref) << 0);
uint2 temp = make_uint2(sum1, sum2);
output_census[image_row * width + image_col] = *reinterpret_cast<unsigned long long int*>(&temp);
}
}
__global__
void cross_construct_kernel(cudaTextureObject_t input_im, uchar4 *ouput_arm_vol, int arm_length, int max_arm_length, int arm_threshold, int strict_arm_threshold, int width, int height){
int image_col = blockIdx.x * blockDim.x + threadIdx.x;
int image_row = blockIdx.y * blockDim.y + threadIdx.y;
if (image_row < height && image_col < width){
uchar4 pix_arm = make_uchar4(0, 0, 0, 0);
int ref = tex2D<unsigned char>(input_im, image_col, image_row);
int scan_length, diff_curr_ref, diff_curr_next;
//Upward scan
scan_length = 0; diff_curr_ref = 0; diff_curr_next = 0;
while (true)
{
diff_curr_ref = abs(ref - tex2D<unsigned char>(input_im, image_col, image_row - scan_length));
diff_curr_next = abs(ref - tex2D<unsigned char>(input_im, image_col, image_row - scan_length - 1));
if (!(scan_length < max_arm_length &&
image_row - scan_length > 0 &&
diff_curr_ref <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold) &&
diff_curr_next <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold))) break;
scan_length++;
}
pix_arm.x = scan_length;
//Downward scan
scan_length = 0; diff_curr_ref = 0; diff_curr_next = 0;
while (true)
{
diff_curr_ref = abs(ref - tex2D<unsigned char>(input_im, image_col, image_row + scan_length));
diff_curr_next = abs(ref - tex2D<unsigned char>(input_im, image_col, image_row + scan_length + 1));
if (!(scan_length < max_arm_length &&
image_row + scan_length < height - 1 &&
diff_curr_ref <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold) &&
diff_curr_next <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold))) break;
scan_length++;
}
pix_arm.y = scan_length;
//Leftward scan
scan_length = 0; diff_curr_ref = 0; diff_curr_next = 0;
while (true)
{
diff_curr_ref = abs(ref - tex2D<unsigned char>(input_im, image_col - scan_length, image_row));
diff_curr_next = abs(ref - tex2D<unsigned char>(input_im, image_col - scan_length - 1, image_row));
if (!(scan_length < max_arm_length &&
image_col - scan_length > 0 &&
diff_curr_ref <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold) &&
diff_curr_next <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold))) break;
scan_length++;
}
pix_arm.z = scan_length;
//Rightward scan
scan_length = 0; diff_curr_ref = 0; diff_curr_next = 0;
while (true)
{
diff_curr_ref = abs(ref - tex2D<unsigned char>(input_im, image_col + scan_length, image_row));
diff_curr_next = abs(ref - tex2D<unsigned char>(input_im, image_col + scan_length + 1, image_row));
if (!(scan_length < max_arm_length &&
image_col + scan_length < width &&
diff_curr_ref <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold) &&
diff_curr_next <= (arm_length < scan_length ? strict_arm_threshold : arm_threshold))) break;
scan_length++;
}
pix_arm.w = scan_length;
pix_arm.x = pix_arm.x == 0 ? (image_row - 2 >= 0 ? 2 : 0) : pix_arm.x;
pix_arm.y = pix_arm.y == 0 ? (image_row + 2 < height ? 2 : 0) : pix_arm.y;
//pix_arm.x = image_row - 2 >= 0 ? 2 : pix_arm.x;
//pix_arm.y = image_row + 2 < height ? 2 : pix_arm.y;
pix_arm.z = pix_arm.z == 0 ? (image_col - 2 >= 0 ? 2 : 0) : pix_arm.z;
pix_arm.w = pix_arm.w == 0 ? (image_col + 2 < width ? 2 : 0) : pix_arm.w;
ouput_arm_vol[image_row * width + image_col] = pix_arm;
}
}
__global__
void cost_initialization_kernel(unsigned char *left, unsigned char *right, unsigned long long int *left_census, unsigned long long int *right_census, float *cost_vol, float ad_gamma, float census_gamma, bool left_to_right, int width, int height){
extern __shared__ unsigned char temp[];
unsigned char *ref_temp = temp;
unsigned char *targ_temp = &ref_temp[blockDim.x];
unsigned long long int *ref_census_temp = (unsigned long long int*)&targ_temp[blockDim.x * 2];
unsigned long long int *targ_census_temp = &ref_census_temp[blockDim.x];
//Initialize to zero
ref_temp[threadIdx.x] = 0;
targ_temp[threadIdx.x] = 0;
targ_temp[blockDim.x + threadIdx.x] = 0;
ref_census_temp[threadIdx.x] = 0;
targ_census_temp[threadIdx.x] = 0;
targ_census_temp[blockDim.x + threadIdx.x] = 0;
__syncthreads();
int image_row = blockIdx.y;
float cost = 0.0f;
if (image_row < height){
if (left_to_right){
for (int image_col = 0; image_col < width; image_col++){
int block_index = image_col % blockDim.x;
if (block_index == 0){
if (image_col + threadIdx.x < width){
ref_temp[threadIdx.x] = left[image_row * width + image_col + threadIdx.x];
ref_census_temp[threadIdx.x] = left_census[image_row * width + image_col + threadIdx.x];
}
if (image_col + threadIdx.x < width){
targ_temp[blockDim.x + threadIdx.x] = right[image_row * width + image_col + threadIdx.x];
targ_census_temp[blockDim.x + threadIdx.x] = right_census[image_row * width + image_col + threadIdx.x];
}
if ((int)(image_col - blockDim.x + threadIdx.x) >= 0 && (int)(image_col - blockDim.x + threadIdx.x) < width){
targ_temp[threadIdx.x] = right[image_row * width + image_col - blockDim.x + threadIdx.x];
targ_census_temp[threadIdx.x] = right_census[image_row * width + image_col - blockDim.x + threadIdx.x];
}
__syncthreads();
}
float ad_cost, census_cost;
ad_cost = (fabsf(ref_temp[block_index] - targ_temp[blockDim.x + block_index - threadIdx.x]) / 255.0f) * ad_gamma;
census_cost = (__popcll(ref_census_temp[block_index] ^ targ_census_temp[blockDim.x + block_index - threadIdx.x]) / 64.0f) * census_gamma;
cost += ad_cost + census_cost;
cost_vol[image_row * width * blockDim.x + image_col * blockDim.x + threadIdx.x] = cost;
}
}
else{
for (int image_col = 0; image_col < width; image_col++){
int block_index = image_col % blockDim.x;
if (block_index == 0){
if (image_col + threadIdx.x < width){
ref_temp[threadIdx.x] = right[image_row * width + image_col + threadIdx.x];
ref_census_temp[threadIdx.x] = right_census[image_row * width + image_col + threadIdx.x];
}
if (image_col + threadIdx.x < width){
targ_temp[threadIdx.x] = left[image_row * width + image_col + threadIdx.x];
targ_census_temp[threadIdx.x] = left_census[image_row * width + image_col + threadIdx.x];
}
if (image_col + blockDim.x + threadIdx.x < width){
targ_temp[blockDim.x + threadIdx.x] = left[image_row * width + image_col + blockDim.x + threadIdx.x];
targ_census_temp[blockDim.x + threadIdx.x] = left_census[image_row * width + image_col + blockDim.x + threadIdx.x];
}
__syncthreads();
}
float ad_cost, census_cost;
ad_cost = (fabsf(ref_temp[block_index] - targ_temp[block_index + threadIdx.x]) / 255.0f) * ad_gamma;
census_cost = (__popcll(ref_census_temp[block_index] ^ targ_census_temp[block_index + threadIdx.x]) / 64.0f) * census_gamma;
cost += ad_cost + census_cost;
cost_vol[image_row * width * blockDim.x + image_col * blockDim.x + threadIdx.x] = cost;
}
}
}
}
__global__
void horizontal_aggregation_kernel(float *cost_vol_in, uchar4 *arm_vol, float *cost_vol_out, int width, int height){
int image_col = blockIdx.x;
float sum = 0.0f;
for (int image_row = 0; image_row < height; image_row++){
uchar4 pixel_arm = arm_vol[image_row * width + image_col];
int right_limit = image_col + pixel_arm.w;
int left_limit = image_col - pixel_arm.z - 1;
float aggregate = cost_vol_in[image_row * width * blockDim.x + right_limit * blockDim.x + threadIdx.x];
if (left_limit >= 0)
aggregate -= cost_vol_in[image_row * width * blockDim.x + left_limit * blockDim.x + threadIdx.x];
sum += aggregate;
cost_vol_out[image_row * width * blockDim.x + image_col * blockDim.x + threadIdx.x] = sum;
}
}
__global__
void vertical_aggregation_kernel(float *cost_vol_in, uchar4 *arm_vol, float *cost_vol_out, unsigned short *disp_im, int width, int height){
int image_row = blockIdx.y;
__shared__ unsigned int reduce_cache[32];
__shared__ float cost_cache[256];
for (int image_col = 0; image_col < width; image_col++){
uchar4 pix_arm = arm_vol[image_row * width + image_col];
int down_lim = image_row + pix_arm.y;
int up_lim = image_row - pix_arm.x - 1;
float aggregate = cost_vol_in[down_lim * width * blockDim.x + image_col * blockDim.x + threadIdx.x];
if (up_lim >= 0)
aggregate -= cost_vol_in[up_lim * width * blockDim.x + image_col * blockDim.x + threadIdx.x];
//cost_vol_out[image_row * width * blockDim.x + image_col * blockDim.x + threadIdx.x] = aggregate;
cost_cache[threadIdx.x] = aggregate;
//Find the minimum
unsigned int min_cost = (((unsigned int)(aggregate * 10000)) << 8) | threadIdx.x;
unsigned int temp_min_cost = 0;
int lane = threadIdx.x % 32;
int wid = threadIdx.x / 32;
temp_min_cost = __shfl_down(min_cost, 16);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 8);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 4);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 2);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 1);
min_cost = min(min_cost, temp_min_cost);
if (lane == 0) reduce_cache[wid] = min_cost;
__syncthreads();
min_cost = (threadIdx.x < blockDim.x / 32) ? reduce_cache[lane] : UINT_MAX;
if (wid == 0){
temp_min_cost = __shfl_down(min_cost, 4);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 2);
min_cost = min(min_cost, temp_min_cost);
temp_min_cost = __shfl_down(min_cost, 1);
min_cost = min(min_cost, temp_min_cost);
}
if (threadIdx.x == 0){
unsigned short disp = (unsigned short)((min_cost & 0x000000FF));
if (disp >= 1 && disp < blockDim.x - 1)
disp_im[image_row * width + image_col] = (unsigned short)((disp + ((cost_cache[disp + 1] - cost_cache[disp - 1]) / (2 * (-cost_cache[disp + 1] - cost_cache[disp - 1] + 2 * cost_cache[disp])))) * 256.0f);
else
disp_im[image_row * width + image_col] = disp << 8;
}
}
}
__global__
void consistency_check_kernel(cudaTextureObject_t left_disp_im, cudaTextureObject_t right_disp_im, unsigned short *output_disp_im, int disparity_tolerance, int width, int height){
int col_to_access = blockIdx.x * blockDim.x + threadIdx.x;
int row_to_access = blockIdx.y * blockDim.y + threadIdx.y;
if (row_to_access < height && col_to_access < width){
unsigned short disp = tex2D<unsigned short>(left_disp_im, col_to_access, row_to_access);
unsigned short to_check = tex2D<unsigned short>(right_disp_im, col_to_access - (disp >> 8), row_to_access);
output_disp_im[row_to_access * width + col_to_access] = (abs(disp - to_check) <= disparity_tolerance * 256) ? disp : OUTLIER;
}
}
__global__
void horizontal_voting_kernel(cudaTextureObject_t input_disp, uchar4 *arm_vol, unsigned short *output_disp, int width, int height){
int image_row = blockIdx.y * blockDim.y + threadIdx.y;
int image_col = blockIdx.x * blockDim.x + threadIdx.x;
if (image_row < height && image_col < width){
int sums[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int eligible_votes = 0;
int no_of_votes = 0;
//Load arm data
uchar4 pix_arm = arm_vol[image_col + image_row * width];
//Check disp value
int disp_value = tex2D<unsigned short>(input_disp, image_col, image_row);
if (disp_value == OUTLIER){
for (int pix_iter = -pix_arm.z; pix_iter <= pix_arm.w; pix_iter++){
int disp_val = tex2D<unsigned short>(input_disp, image_col + pix_iter, image_row);
if (disp_val != OUTLIER){
sums[0] += ((disp_val & 1) != 0);
sums[1] += ((disp_val & 2) != 0);
sums[2] += ((disp_val & 4) != 0);
sums[3] += ((disp_val & 8) != 0);
sums[4] += ((disp_val & 16) != 0);
sums[5] += ((disp_val & 32) != 0);
sums[6] += ((disp_val & 64) != 0);
sums[7] += ((disp_val & 128) != 0);
sums[8] += ((disp_val & 256) != 0);
sums[9] += ((disp_val & 512) != 0);
sums[10] += ((disp_val & 1024) != 0);
sums[11] += ((disp_val & 2048) != 0);
sums[12] += ((disp_val & 4096) != 0);
sums[13] += ((disp_val & 8192) != 0);
sums[14] += ((disp_val & 16384) != 0);
sums[15] += ((disp_val & 32768) != 0);
eligible_votes++;
}
no_of_votes++;
}
__syncthreads();
int majority = eligible_votes * 0.5;
disp_value = (
((sums[15] > majority) << 15) +
((sums[14] > majority) << 14) +
((sums[13] > majority) << 13) +
((sums[12] > majority) << 12) +
((sums[11] > majority) << 11) +
((sums[10] > majority) << 10) +
((sums[9] > majority) << 9) +
((sums[8] > majority) << 8) +
((sums[7] > majority) << 7) +
((sums[6] > majority) << 6) +
((sums[5] > majority) << 5) +
((sums[4] > majority) << 4) +
((sums[3] > majority) << 3) +
((sums[2] > majority) << 2) +
((sums[1] > majority) << 1) +
((sums[0] > majority) << 0));
disp_value = (eligible_votes > no_of_votes * 0.35f) ? disp_value : OUTLIER;
}
output_disp[image_col + image_row * width] = disp_value;
}
}
__global__
void vertical_voting_kernel(cudaTextureObject_t input_disp, uchar4 *arm_vol, unsigned short *output_disp, int width, int height){
int image_row = blockIdx.y * blockDim.y + threadIdx.y;
int image_col = blockIdx.x * blockDim.x + threadIdx.x;
if (image_row < height && image_col < width){
int sums[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int eligible_votes = 0;
int no_of_votes = 0;
//Load arm data
uchar4 pix_arm = arm_vol[image_col + image_row * width];
//Check disp value
int disp_value = tex2D<unsigned short>(input_disp, image_col, image_row);
if (disp_value == OUTLIER){
for (int pix_iter = -pix_arm.x; pix_iter <= pix_arm.y; pix_iter++){
int disp_val = tex2D<unsigned char>(input_disp, image_col, image_row + pix_iter);
if (disp_val != OUTLIER){
sums[0] += ((disp_val & 1) != 0);
sums[1] += ((disp_val & 2) != 0);
sums[2] += ((disp_val & 4) != 0);
sums[3] += ((disp_val & 8) != 0);
sums[4] += ((disp_val & 16) != 0);
sums[5] += ((disp_val & 32) != 0);
sums[6] += ((disp_val & 64) != 0);
sums[7] += ((disp_val & 128) != 0);
sums[8] += ((disp_val & 256) != 0);
sums[9] += ((disp_val & 512) != 0);
sums[10] += ((disp_val & 1024) != 0);
sums[11] += ((disp_val & 2048) != 0);
sums[12] += ((disp_val & 4096) != 0);
sums[13] += ((disp_val & 8192) != 0);
sums[14] += ((disp_val & 16384) != 0);
sums[15] += ((disp_val & 32768) != 0);
eligible_votes++;
}
no_of_votes++;
}
__syncthreads();
int majority = eligible_votes * 0.5;
disp_value = (
((sums[15] > majority) << 15) +
((sums[14] > majority) << 14) +
((sums[13] > majority) << 13) +
((sums[12] > majority) << 12) +
((sums[11] > majority) << 11) +
((sums[10] > majority) << 10) +
((sums[9] > majority) << 9) +
((sums[8] > majority) << 8) +
((sums[7] > majority) << 7) +
((sums[6] > majority) << 6) +
((sums[5] > majority) << 5) +
((sums[4] > majority) << 4) +
((sums[3] > majority) << 3) +
((sums[2] > majority) << 2) +
((sums[1] > majority) << 1) +
((sums[0] > majority) << 0));
disp_value = (eligible_votes > no_of_votes * 0.35f) ? disp_value : OUTLIER;
}
output_disp[image_col + image_row * width] = disp_value;
}
}
__global__
void median_filter_kernel(unsigned short *d_in, unsigned short *d_out, int nx, int ny)
{
int tx = threadIdx.x, ty = threadIdx.y;
// guards: is at boundary?
bool is_x_top = (tx == 0), is_x_bot = (tx == BLOCK_X - 1);
bool is_y_top = (ty == 0), is_y_bot = (ty == BLOCK_Y - 1);
__shared__ unsigned short smem[BLOCK_X + 2][BLOCK_Y + 2];
// clear out shared memory (zero padding)
if (is_x_top) SMEM(tx - 1, ty) = 0;
else if (is_x_bot) SMEM(tx + 1, ty) = 0;
if (is_y_top) {
SMEM(tx, ty - 1) = 0;
if (is_x_top) SMEM(tx - 1, ty - 1) = 0;
else if (is_x_bot) SMEM(tx + 1, ty - 1) = 0;
}
else if (is_y_bot) {
SMEM(tx, ty + 1) = 0;
if (is_x_top) SMEM(tx - 1, ty + 1) = 0;
else if (is_x_bot) SMEM(tx + 1, ty + 1) = 0;
}
// guards: is at boundary and still more image?
int x = blockIdx.x * blockDim.x + tx;
int y = blockIdx.y * blockDim.y + ty;
is_x_top &= (x > 0); is_x_bot &= (x < nx - 1);
is_y_top &= (y > 0); is_y_bot &= (y < ny - 1);
// each thread pulls from image
SMEM(tx, ty) = IN(x, y); // self
if (is_x_top) SMEM(tx - 1, ty) = IN(x - 1, y);
else if (is_x_bot) SMEM(tx + 1, ty) = IN(x + 1, y);
if (is_y_top) {
SMEM(tx, ty - 1) = IN(x, y - 1);
if (is_x_top) SMEM(tx - 1, ty - 1) = IN(x - 1, y - 1);
else if (is_x_bot) SMEM(tx + 1, ty - 1) = IN(x + 1, y - 1);
}
else if (is_y_bot) {
SMEM(tx, ty + 1) = IN(x, y + 1);
if (is_x_top) SMEM(tx - 1, ty + 1) = IN(x - 1, y + 1);
else if (is_x_bot) SMEM(tx + 1, ty + 1) = IN(x + 1, y + 1);
}
__syncthreads();
// pull top six from shared memory
unsigned short v[6] = { SMEM(tx - 1, ty - 1), SMEM(tx, ty - 1), SMEM(tx + 1, ty - 1),
SMEM(tx - 1, ty), SMEM(tx, ty), SMEM(tx + 1, ty) };
// with each pass, remove min and max values and add new value
mnmx6(v[0], v[1], v[2], v[3], v[4], v[5]);
v[5] = SMEM(tx - 1, ty + 1); // add new contestant
mnmx5(v[1], v[2], v[3], v[4], v[5]);
v[5] = SMEM(tx, ty + 1);
mnmx4(v[2], v[3], v[4], v[5]);
v[5] = SMEM(tx + 1, ty + 1);
mnmx3(v[3], v[4], v[5]);
// pick the middle one
d_out[y*nx + x] = v[4];
}
/////////////////////////////////////////////////////////////////////////////Stubs/////////////////////////////////////////////////////////////////////////////
void census_transform(cudaTextureObject_t input_im, unsigned long long int *output_census, int width, int height, cudaStream_t stream){
dim3 threads(16, 16);
dim3 blocks(DIVIDE_UP(width, threads.x), DIVIDE_UP(height, threads.y));
census_transform_kernel << <blocks, threads >> >(input_im, output_census, width, height);
#ifdef KERN_DEB
SAFE_CALL(cudaDeviceSynchronize(), "Census transform failed.");
#endif
}
void cross_construct(cudaTextureObject_t input_im, uchar4 *arm_vol, int arm_length, int max_arm_length, int arm_threshold, int strict_arm_threshold, int width, int height, cudaStream_t stream){
dim3 threads(16, 16);
dim3 blocks(DIVIDE_UP(width, threads.x), DIVIDE_UP(height, threads.y));
cross_construct_kernel << <blocks, threads >> >(input_im, arm_vol, arm_length, max_arm_length, arm_threshold, strict_arm_threshold, width, height);
#ifdef KERN_DEB
SAFE_CALL(cudaDeviceSynchronize(), "Cross construct failed.");
#endif
}
void match(unsigned char *left, unsigned char *right,
unsigned long long int *left_census, unsigned long long int *right_census, float *cost_vol_temp_a, float *cost_vol_temp_b, uchar4 *arm_vol,
unsigned short *disp_im, float gamma, float census_gamma, bool left_to_right, int width, int height, int max_disparity, cudaStream_t stream){
dim3 b(1, height); dim3 t(max_disparity);
size_t mem_sz = t.x * (sizeof(unsigned long long int) + sizeof(unsigned char)) * 3;
cost_initialization_kernel << <b, t, mem_sz >> >(left, right, left_census, right_census, (float*)cost_vol_temp_a, gamma, census_gamma, left_to_right, width, height);
#ifdef KERN_DEB
SAFE_CALL(cudaDeviceSynchronize(), "Cost initialization failed.");
#endif
b = dim3(width);
t = dim3(max_disparity);
horizontal_aggregation_kernel << < b, t >> > ((float*)cost_vol_temp_a, arm_vol, (float*)cost_vol_temp_b, width, height);
#ifdef KERN_DEB
SAFE_CALL(cudaDeviceSynchronize(), "Horizontal aggregation failed.");
#endif
b = dim3(1, height);
t = dim3(max_disparity);
vertical_aggregation_kernel << < b, t >> > ((float*)cost_vol_temp_b, arm_vol, (float*)cost_vol_temp_a, disp_im, width, height);
#ifdef KERN_DEB
SAFE_CALL(cudaDeviceSynchronize(), "Horizontal aggregation failed.");
#endif
}
void check_consistency(cudaTextureObject_t left_disp_im, cudaTextureObject_t right_disp_im, unsigned short *output_disp_im, int disparity_tolerance, int width, int height, cudaStream_t stream){
dim3 threads(16, 16);
dim3 blocks(DIVIDE_UP(width, threads.x), DIVIDE_UP(height, threads.y));
consistency_check_kernel << <blocks, threads >> >(left_disp_im, right_disp_im, output_disp_im, disparity_tolerance, width, height);
#ifdef KERN_DEB
SAFE_CALL(cudaDeviceSynchronize(), "Consistency check failed.");
#endif
}
void horizontal_voting(cudaTextureObject_t input_disp, uchar4 *arm_vol, unsigned short *output_disp, int width, int height, cudaStream_t stream){
dim3 threads(16, 16);
dim3 blocks(DIVIDE_UP(width, threads.x), DIVIDE_UP(height, threads.y));
horizontal_voting_kernel << <blocks, threads >> >(input_disp, arm_vol, output_disp, width, height);
#ifdef KERN_DEB
SAFE_CALL(cudaDeviceSynchronize(), "Horizontal voting failed.");
#endif
}
void vertical_voting(cudaTextureObject_t input_disp, uchar4 *arm_vol, unsigned short *output_disp, int width, int height, cudaStream_t stream){
dim3 threads(16, 16);
dim3 blocks(DIVIDE_UP(width, threads.x), DIVIDE_UP(height, threads.y));
vertical_voting_kernel << <blocks, threads >> >(input_disp, arm_vol, output_disp, width, height);
#ifdef KERN_DEB
SAFE_CALL(cudaDeviceSynchronize(), "Vertical voting failed.");
#endif
}
void median_filter(unsigned short *input_disp, unsigned short *output_disp, int width, int height){
dim3 blocks(width / BLOCK_X, height / BLOCK_Y);
dim3 threads(BLOCK_X, BLOCK_Y);
median_filter_kernel << <blocks, threads >> >(input_disp, output_disp, width, height);
}
|
9b94601a7c3e5b5c91a6dafcaeb89309eecdac1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void x2(float* x3, float x4, int x5) {
int x6 = gridDim.x * blockDim.x;
int x7 = threadIdx.x + blockIdx.x * blockDim.x;
while (x7 < x5) {
x3[x7] = x4;
x7 = x7 + x6;
}
} | 9b94601a7c3e5b5c91a6dafcaeb89309eecdac1a.cu | #include "includes.h"
__global__ void x2(float* x3, float x4, int x5) {
int x6 = gridDim.x * blockDim.x;
int x7 = threadIdx.x + blockIdx.x * blockDim.x;
while (x7 < x5) {
x3[x7] = x4;
x7 = x7 + x6;
}
} |
9dcdc254133be0f53599ae9808920d1c92f1d772.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name(s) of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unordered_map>
#include <vector>
#include <hip/hip_runtime.h>
#include <cutensor.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \
if( err != CUTENSOR_STATUS_SUCCESS ) \
{ printf("Error: %s\n", cutensorGetErrorString(err)); return err; } \
};
#define HANDLE_CUDA_ERROR(x) \
{ const auto err = x; \
if( err != hipSuccess ) \
{ printf("Error: %s\n", hipGetErrorString(err)); return err; } \
};
struct GPUTimer
{
GPUTimer()
{
hipEventCreate(&start_);
hipEventCreate(&stop_);
hipEventRecord(start_, 0);
}
~GPUTimer()
{
hipEventDestroy(start_);
hipEventDestroy(stop_);
}
void start()
{
hipEventRecord(start_, 0);
}
float seconds()
{
hipEventRecord(stop_, 0);
hipEventSynchronize(stop_);
float time;
hipEventElapsedTime(&time, start_, stop_);
return time * 1e-3;
}
private:
hipEvent_t start_, stop_;
};
int main()
{
typedef float floatTypeA;
typedef float floatTypeB;
typedef float floatTypeC;
typedef float floatTypeCompute;
hipDataType typeA = HIP_R_32F;
hipDataType typeC = HIP_R_32F;
cutensorComputeType_t typeCompute = CUTENSOR_COMPUTE_32F;
floatTypeCompute alpha = (floatTypeCompute)1.1f;
floatTypeCompute beta = (floatTypeCompute)0.f;
/**********************
* Computing (partial) reduction : C_{m,v} = alpha * A_{m,h,k,v} + beta * C_{m,v}
*********************/
std::vector<int32_t> modeA{'m','h','k','v'};
std::vector<int32_t> modeC{'m','v'};
int32_t nmodeA = modeA.size();
int32_t nmodeC = modeC.size();
std::unordered_map<int32_t, int64_t> extent;
extent['m'] = 196;
extent['v'] = 64;
extent['h'] = 256;
extent['k'] = 64;
std::vector<int64_t> extentC;
for (auto mode : modeC)
extentC.push_back(extent[mode]);
std::vector<int64_t> extentA;
for (auto mode : modeA)
extentA.push_back(extent[mode]);
/**********************
* Allocating data
*********************/
size_t elementsA = 1;
for (auto mode : modeA)
elementsA *= extent[mode];
size_t elementsC = 1;
for (auto mode : modeC)
elementsC *= extent[mode];
size_t sizeA = sizeof(floatTypeA) * elementsA;
size_t sizeC = sizeof(floatTypeC) * elementsC;
printf("Total memory: %.2f GiB\n",(sizeA + sizeC)/1024./1024./1024);
void *A_d, *C_d;
HANDLE_CUDA_ERROR(hipMalloc((void**)&A_d, sizeA));
HANDLE_CUDA_ERROR(hipMalloc((void**)&C_d, sizeC));
floatTypeA *A = (floatTypeA*) malloc(sizeof(floatTypeA) * elementsA);
floatTypeC *C = (floatTypeC*) malloc(sizeof(floatTypeC) * elementsC);
if (A == NULL || C == NULL)
{
printf("Error: Host allocation of A, B, or C.\n");
return -1;
}
/*******************
* Initialize data
*******************/
for (int64_t i = 0; i < elementsA; i++)
A[i] = (((float) rand())/RAND_MAX - 0.5)*100;
for (int64_t i = 0; i < elementsC; i++)
C[i] = (((float) rand())/RAND_MAX - 0.5)*100;
HANDLE_CUDA_ERROR(hipMemcpy(C_d, C, sizeC, hipMemcpyHostToDevice));
HANDLE_CUDA_ERROR(hipMemcpy(A_d, A, sizeA, hipMemcpyHostToDevice));
/*************************
* cuTENSOR
*************************/
cutensorHandle_t handle;
HANDLE_ERROR(cutensorInit(&handle));
/**********************
* Create Tensor Descriptors
**********************/
cutensorTensorDescriptor_t descA;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descA,
nmodeA,
extentA.data(),
NULL /* stride */,
typeA, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descC;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descC,
nmodeC,
extentC.data(),
NULL /* stride */,
typeC, CUTENSOR_OP_IDENTITY));
const cutensorOperator_t opReduce = CUTENSOR_OP_ADD;
/**********************
* Querry workspace
**********************/
uint64_t worksize = 0;
HANDLE_ERROR(cutensorReductionGetWorkspaceSize(&handle,
A_d, &descA, modeA.data(),
C_d, &descC, modeC.data(),
C_d, &descC, modeC.data(),
opReduce, typeCompute, &worksize));
void *work = nullptr;
if (worksize > 0)
{
if (hipSuccess != hipMalloc(&work, worksize))
{
work = nullptr;
worksize = 0;
}
}
/**********************
* Run
**********************/
double minTimeCUTENSOR = 1e100;
cutensorStatus_t err;
for(int i=0; i < 3; ++i)
{
HANDLE_CUDA_ERROR(hipMemcpy(C_d, C, sizeC, hipMemcpyHostToDevice));
HANDLE_CUDA_ERROR(hipDeviceSynchronize());
// Set up timing
GPUTimer timer;
timer.start();
err = cutensorReduction(&handle,
(const void*)&alpha, A_d, &descA, modeA.data(),
(const void*)&beta, C_d, &descC, modeC.data(),
C_d, &descC, modeC.data(),
opReduce, typeCompute, work, worksize, 0 /* stream */);
// Synchronize and measure timing
auto time = timer.seconds();
if (err != CUTENSOR_STATUS_SUCCESS)
{
printf("ERROR: %s in line %d\n", cutensorGetErrorString(err), __LINE__);
}
minTimeCUTENSOR = (minTimeCUTENSOR < time) ? minTimeCUTENSOR : time;
}
/*************************/
double transferedBytes = sizeC + sizeA;
transferedBytes += ((float) beta != 0.f) ? sizeC : 0;
transferedBytes /= 1e9;
printf("cuTensor: %.2f GB/s\n", transferedBytes / minTimeCUTENSOR);
if (A) free(A);
if (C) free(C);
if (A_d) hipFree(A_d);
if (C_d) hipFree(C_d);
if (work) hipFree(work);
return 0;
}
| 9dcdc254133be0f53599ae9808920d1c92f1d772.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name(s) of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unordered_map>
#include <vector>
#include <cuda_runtime.h>
#include <cutensor.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \
if( err != CUTENSOR_STATUS_SUCCESS ) \
{ printf("Error: %s\n", cutensorGetErrorString(err)); return err; } \
};
#define HANDLE_CUDA_ERROR(x) \
{ const auto err = x; \
if( err != cudaSuccess ) \
{ printf("Error: %s\n", cudaGetErrorString(err)); return err; } \
};
struct GPUTimer
{
GPUTimer()
{
cudaEventCreate(&start_);
cudaEventCreate(&stop_);
cudaEventRecord(start_, 0);
}
~GPUTimer()
{
cudaEventDestroy(start_);
cudaEventDestroy(stop_);
}
void start()
{
cudaEventRecord(start_, 0);
}
float seconds()
{
cudaEventRecord(stop_, 0);
cudaEventSynchronize(stop_);
float time;
cudaEventElapsedTime(&time, start_, stop_);
return time * 1e-3;
}
private:
cudaEvent_t start_, stop_;
};
int main()
{
typedef float floatTypeA;
typedef float floatTypeB;
typedef float floatTypeC;
typedef float floatTypeCompute;
cudaDataType_t typeA = CUDA_R_32F;
cudaDataType_t typeC = CUDA_R_32F;
cutensorComputeType_t typeCompute = CUTENSOR_COMPUTE_32F;
floatTypeCompute alpha = (floatTypeCompute)1.1f;
floatTypeCompute beta = (floatTypeCompute)0.f;
/**********************
* Computing (partial) reduction : C_{m,v} = alpha * A_{m,h,k,v} + beta * C_{m,v}
*********************/
std::vector<int32_t> modeA{'m','h','k','v'};
std::vector<int32_t> modeC{'m','v'};
int32_t nmodeA = modeA.size();
int32_t nmodeC = modeC.size();
std::unordered_map<int32_t, int64_t> extent;
extent['m'] = 196;
extent['v'] = 64;
extent['h'] = 256;
extent['k'] = 64;
std::vector<int64_t> extentC;
for (auto mode : modeC)
extentC.push_back(extent[mode]);
std::vector<int64_t> extentA;
for (auto mode : modeA)
extentA.push_back(extent[mode]);
/**********************
* Allocating data
*********************/
size_t elementsA = 1;
for (auto mode : modeA)
elementsA *= extent[mode];
size_t elementsC = 1;
for (auto mode : modeC)
elementsC *= extent[mode];
size_t sizeA = sizeof(floatTypeA) * elementsA;
size_t sizeC = sizeof(floatTypeC) * elementsC;
printf("Total memory: %.2f GiB\n",(sizeA + sizeC)/1024./1024./1024);
void *A_d, *C_d;
HANDLE_CUDA_ERROR(cudaMalloc((void**)&A_d, sizeA));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&C_d, sizeC));
floatTypeA *A = (floatTypeA*) malloc(sizeof(floatTypeA) * elementsA);
floatTypeC *C = (floatTypeC*) malloc(sizeof(floatTypeC) * elementsC);
if (A == NULL || C == NULL)
{
printf("Error: Host allocation of A, B, or C.\n");
return -1;
}
/*******************
* Initialize data
*******************/
for (int64_t i = 0; i < elementsA; i++)
A[i] = (((float) rand())/RAND_MAX - 0.5)*100;
for (int64_t i = 0; i < elementsC; i++)
C[i] = (((float) rand())/RAND_MAX - 0.5)*100;
HANDLE_CUDA_ERROR(cudaMemcpy(C_d, C, sizeC, cudaMemcpyHostToDevice));
HANDLE_CUDA_ERROR(cudaMemcpy(A_d, A, sizeA, cudaMemcpyHostToDevice));
/*************************
* cuTENSOR
*************************/
cutensorHandle_t handle;
HANDLE_ERROR(cutensorInit(&handle));
/**********************
* Create Tensor Descriptors
**********************/
cutensorTensorDescriptor_t descA;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descA,
nmodeA,
extentA.data(),
NULL /* stride */,
typeA, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descC;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descC,
nmodeC,
extentC.data(),
NULL /* stride */,
typeC, CUTENSOR_OP_IDENTITY));
const cutensorOperator_t opReduce = CUTENSOR_OP_ADD;
/**********************
* Querry workspace
**********************/
uint64_t worksize = 0;
HANDLE_ERROR(cutensorReductionGetWorkspaceSize(&handle,
A_d, &descA, modeA.data(),
C_d, &descC, modeC.data(),
C_d, &descC, modeC.data(),
opReduce, typeCompute, &worksize));
void *work = nullptr;
if (worksize > 0)
{
if (cudaSuccess != cudaMalloc(&work, worksize))
{
work = nullptr;
worksize = 0;
}
}
/**********************
* Run
**********************/
double minTimeCUTENSOR = 1e100;
cutensorStatus_t err;
for(int i=0; i < 3; ++i)
{
HANDLE_CUDA_ERROR(cudaMemcpy(C_d, C, sizeC, cudaMemcpyHostToDevice));
HANDLE_CUDA_ERROR(cudaDeviceSynchronize());
// Set up timing
GPUTimer timer;
timer.start();
err = cutensorReduction(&handle,
(const void*)&alpha, A_d, &descA, modeA.data(),
(const void*)&beta, C_d, &descC, modeC.data(),
C_d, &descC, modeC.data(),
opReduce, typeCompute, work, worksize, 0 /* stream */);
// Synchronize and measure timing
auto time = timer.seconds();
if (err != CUTENSOR_STATUS_SUCCESS)
{
printf("ERROR: %s in line %d\n", cutensorGetErrorString(err), __LINE__);
}
minTimeCUTENSOR = (minTimeCUTENSOR < time) ? minTimeCUTENSOR : time;
}
/*************************/
double transferedBytes = sizeC + sizeA;
transferedBytes += ((float) beta != 0.f) ? sizeC : 0;
transferedBytes /= 1e9;
printf("cuTensor: %.2f GB/s\n", transferedBytes / minTimeCUTENSOR);
if (A) free(A);
if (C) free(C);
if (A_d) cudaFree(A_d);
if (C_d) cudaFree(C_d);
if (work) cudaFree(work);
return 0;
}
|
bd8d7d3492d2f19baeda6495153c6027e350a87e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SOMClassifier.cpp : Defines the entry point for the console application.
//
#include <fstream>
#include <stdio.h>
#include <tchar.h>
#include <iostream>
#include <math.h>
#include <vector>
#include <fstream>
#include <string>
#include <sstream>
#include "EasyBMP\EasyBMP.h"
#include <time.h>
#include <thrust\extrema.h>
#include <math.h>
#define NUM_NEURONS 1000
struct Neuron{
int X;
int Y;
int Z;
};
Neuron * d_neurons;
Neuron * neurons;
float * weights;
float * d_weights;
float * input_data;
float * d_input_data;
using namespace std;
int numIterations = 1000;
float initLearningRate = .8;
int mapWidth = 25, mapHeight = 25, mapDepth = 25;
//int initialMapRadius = 5;
string int_to_str(int i){
stringstream ss;
ss << i;
string str = ss.str();
return str;
}
float map(float value, float istart, float istop, float ostart, float ostop) {
return ostart + (ostop - ostart) * ((value - istart) / (istop - istart));
}
void readData(float * input_data){
int numSamples = 40;
string subject_id = "";
string face_position_num = "";
string face_num = "";
string final = "";
BMP image;
int count = 0;
int countImage = 0;
for(int i = 1; i<=numSamples; i++){
subject_id = "s"+int_to_str(i);
for(int j = 1; j<=10; j++){
face_position_num = int_to_str(j);
for(int k = 0; k<3; k++){
face_num = int_to_str(k);
final = "..\\images\\"+subject_id+"\\"+face_position_num+"\\"+face_num+".bmp";
image.ReadFromFile(final.c_str());
//face_position is the orientation ID of the face
for(int l = 0; l< 23; l++){
for(int m = 0; m<28; m++){
input_data[countImage] = map((((float)image(l, m)->Red)/255), 0, 1, -1, 1);
countImage++;
}
}
}
}
}
}
double calcDistBetweenNodes(Neuron n1, Neuron n2){
double temp = (double)((n1.X-n2.X)*(n1.X-n2.X)+(n1.Y-n2.Y)*(n1.Y-n2.Y)+(n1.Z-n2.Z)*(n1.Z-n2.Z));
return sqrt(temp);
}
__global__ void findBMU(float * inputVector, float * weights, float * distances){
int i = threadIdx.x+(blockIdx.x*blockDim.x);
if(i<NUM_NEURONS){
int offset = i*644;
int count = 0;
float currentDistance = 0;
for(int w = offset; w<offset+644; w++){
currentDistance += abs((inputVector[count]-weights[w]))*abs((inputVector[count]-weights[w]));
count++;
}
distances[i] = sqrt(currentDistance);
}
}
float mapRadius(int time){
double initialMapRadius = max(mapWidth, mapHeight)/1.5;
double timeConstant = numIterations/log(initialMapRadius);
double radius = initialMapRadius*exp(-(time/timeConstant));
return radius;
}
double learningRate(int time){
float iterations = (float)numIterations;
double rate = initLearningRate*exp((-(time/iterations)));
return rate;
}
double theta(float distanceBetweenNodes, float radius){
return exp(-(distanceBetweenNodes*distanceBetweenNodes)/(2*radius*radius));
}
double mapRadius(int time, int initialRadius, int newIterations){
double initialMapRadius = (double)initialRadius;
double timeConstant = newIterations/log(initialMapRadius);
double radius = initialMapRadius*exp(-(time/timeConstant));
return radius;
}
double learningRate(int time, double newLearningRate, int newIterations){
float iterations = (float)newIterations;
double rate = newLearningRate*exp((-(time/iterations)));
return rate;
}
__global__ void printWeights(float*weights){
int i = threadIdx.x+(blockIdx.x*blockDim.x);
printf("%f\n", weights[i]);
}
void train(){
Neuron winningNeuron;
int *winningNeuronID;
winningNeuronID = (int*)malloc(sizeof(int));
int subjectNum;
int positionNum;
float neighboorhoodRadius;
float rate;
float distance;
double coeff;
float * data;
data = (float*)malloc(sizeof(float)*644);
float * d_data;
hipMalloc(&d_data, sizeof(float)*644);
float * distances;
distances = (float*)malloc(sizeof(float)*NUM_NEURONS);
float * d_distances;
hipMalloc(&d_distances, sizeof(float)*NUM_NEURONS);
int winner;
float * winnerID;
for(int y = 0; y<numIterations; y++){
//select a random image
positionNum = rand()%10;
subjectNum = rand()%4;
int count = 0;
for(int i = (subjectNum*30+positionNum*3)*644; i<(subjectNum*30+positionNum*3)*644+644; i++){
data[count] = input_data[i];
count++;
}
hipMemcpy(d_data, data, 644*sizeof(float), hipMemcpyHostToDevice);
count = 0;
findBMU<<<20, 50>>>(d_data, d_weights, d_distances);
hipMemcpy(distances, d_distances, sizeof(float)*NUM_NEURONS, hipMemcpyDeviceToHost);
winnerID = thrust::min_element(distances, distances+NUM_NEURONS);
winner = winnerID-distances;
neighboorhoodRadius = mapRadius(y);
rate = learningRate(y);
for(int h = 0; h<mapWidth; h++){
for(int i = 0; i<mapHeight; i++){
for(int j = 0; j<mapDepth; j++){
distance = calcDistBetweenNodes(neurons[h*(int)(pow((double)mapWidth, 2.0)+i*(mapWidth)+j], neurons[winner]);
if(distance<neighboorhoodRadius){
coeff = theta(distance, neighboorhoodRadius)*rate;
float * newWeight;
newWeight = new float [644];
for (int w = 0; w<644; w++){
double diff = data[w]-weights[((h*100+i*10+j)*644)+w];
newWeight[w] =diff*coeff;
}
for (int w = 0; w<644; w++){
weights[((h*100+i*10+j)*644)+w]+=newWeight[w];
}
delete newWeight;
}
}
}
}
hipMemcpy(d_weights, weights, NUM_NEURONS*644*sizeof(float), hipMemcpyHostToDevice);
if(y%10 == 0){
cout << y << endl;
}
}
}
void train(int initialSize, float newLearningRate, float number_of_iterations){
Neuron winningNeuron;
int *winningNeuronID;
winningNeuronID = (int*)malloc(sizeof(int));
int subjectNum;
int positionNum;
float neighboorhoodRadius;
float rate;
float distance;
double coeff;
float * data;
data = (float*)malloc(sizeof(float)*644);
float * d_data;
hipMalloc(&d_data, sizeof(float)*644);
float * distances;
distances = (float*)malloc(sizeof(float)*NUM_NEURONS);
float * d_distances;
hipMalloc(&d_distances, sizeof(float)*NUM_NEURONS);
int count = 0;
int winner;
float * winnerID;
for(int y = 0; y<number_of_iterations; y++){
//select a random image
positionNum = rand()%10;
subjectNum = rand()%4;
for(int i = (subjectNum*30+positionNum*3)*644; i<(subjectNum*30+positionNum*3)*644+644; i++){
data[count] = input_data[i];
count++;
}
count = 0;
hipMemcpy(d_data, data, 644*sizeof(float), hipMemcpyHostToDevice);
findBMU<<<20, 50>>>(d_data, d_weights, d_distances);
hipMemcpy(distances, d_distances, sizeof(float)*NUM_NEURONS, hipMemcpyDeviceToHost);
winnerID = thrust::min_element(distances, distances+NUM_NEURONS);
winner = winnerID-distances;
neighboorhoodRadius = mapRadius(y, initialSize,number_of_iterations );
rate = learningRate(y, newLearningRate, number_of_iterations);
for(int h = 0; h<mapWidth; h++){
for(int i = 0; i<mapHeight; i++){
for(int j = 0; j<mapDepth; j++){
distance = calcDistBetweenNodes(neurons[h*100+i*10+j], neurons[winner]);
if(distance<neighboorhoodRadius){
coeff = theta(distance, neighboorhoodRadius)*rate;
float * newWeight;
newWeight = new float [644];
for (int w = 0; w<644; w++){
double diff = data[w]-weights[((h*100+i*10+j)*644)+w];
newWeight[w] =diff*coeff;
}
for (int w = 0; w<644; w++){
weights[((h*100+i*10+j)*644)+w]+=newWeight[w];
}
delete newWeight;
}
}
}
}
hipMemcpy(d_weights, weights, NUM_NEURONS*644*sizeof(float), hipMemcpyHostToDevice);
if(y%10 == 0){
cout << y << endl;
}
}
}
void setXYZ(Neuron * neurons){
for(int i = 0; i<10; i++){
for(int j = 0; j<10; j++){
for(int k = 0; k<10; k++){
neurons[i*100+j*10+k].X = i;
neurons[i*100+j*10+k].Y = j;
neurons[i*100+j*10+k].Z = k;
}
}
}
}
//this is done on HOST side because rand() can't be called device side
void setWeights(float * weights){
for(int i = 0; i<NUM_NEURONS*644; i++){
weights[i] = (double)rand()/RAND_MAX;
}
}
void outputImageFromNetwork(float*input_data, int offset, char*outputFileName){
BMP image;
image.SetSize(23, 38);
RGBApixel pixel;
int count = 0;
for(int i = 0; i<23; i++){
for(int j = 0; j<28; j++){
pixel.Red = map(input_data[offset+count], 0, 1, 0, 255);
pixel.Blue = map(input_data[offset+count], 0, 1, 0, 255);
pixel.Green = map(input_data[offset+count], 0, 1, 0, 255);
image.SetPixel(i, j, pixel);
count++;
}
}
image.WriteToFile(outputFileName);
}
int main(int argc, char*argv[])
{
printf("helllooooooo \n");
/*
* SET XYZ FOR THE HOST AND DEVICE NEURONS
*/
//-------------------------------------------------
neurons = (Neuron *)malloc(NUM_NEURONS*sizeof(Neuron)); // allocate memory for host neurons
hipMalloc((void**)&d_neurons, NUM_NEURONS*sizeof(Neuron)); // allocate memory for device neurons
dim3 dim = *(new dim3(10, 10, 10));
setXYZ(neurons); //set XYZ params on DEVICE side
//hipMemcpy(neurons, d_neurons, NUM_NEURONS*sizeof(Neuron), hipMemcpyDeviceToHost); // copy over to host
//-------------------------------------------------
printf("Stage 1 complete \n");
/*
* Initialize weights
*/
//------------------------------------------------
srand(time(NULL));
weights = (float*)malloc(NUM_NEURONS*644*sizeof(float)); // allocate mem for host weights
hipMalloc(&d_weights, NUM_NEURONS*644*sizeof(float)); // allocate mem for device weights
setWeights(weights); // set weights on the HOST side
hipMemcpy(d_weights, weights, NUM_NEURONS*644*sizeof(float), hipMemcpyHostToDevice); // copy over to device
//-------------------------------------------------
printf("Stage 2 complete \n");
/*
* Read data from file
*/
//-------------------------------------------------
input_data = (float *) malloc(1200*644*sizeof(float)); //allocate mem for host image data
hipMalloc(&d_input_data, 1200*644*sizeof(float)); //allocate mem for device image data
readData(input_data); // read data with host array
hipMemcpy(d_input_data, input_data, 1200*644*sizeof(float), hipMemcpyHostToDevice); //copy to device
//-------------------------------------------------
printf("Training started \n");
train();
train(2, .02, 500);
hipMemcpy(d_weights, weights, NUM_NEURONS*644*sizeof(float), hipMemcpyHostToDevice);
int count = 0;
float * data;
data = (float*)malloc(sizeof(float)*644);
float * d_data;
hipMalloc(&d_data, sizeof(float)*644);
float * distances;
distances = (float*)malloc(sizeof(float)*NUM_NEURONS);
float * d_distances;
hipMalloc(&d_distances, sizeof(float)*NUM_NEURONS);
int winner;
float *winnerID;
ofstream file;
file.open("data.txt");
for(int i = 0; i<120; i+=3){
for(int j = i*644; j<i*644+644; j++){
data[count] = input_data[j];
count++;
}
hipMemcpy(d_data, data, 644*sizeof(float), hipMemcpyHostToDevice);
count = 0;
findBMU<<<20, 50>>>(d_data, d_weights, d_distances);
hipMemcpy(distances, d_distances, sizeof(float)*NUM_NEURONS, hipMemcpyDeviceToHost);
winnerID = thrust::min_element(distances, distances+NUM_NEURONS);
winner = winnerID-distances;
file << neurons[winner].X<<(i==117 ? "" : ",");
}
file << endl;
for(int i = 0; i<120; i+=3){
for(int j = i*644; j<i*644+644; j++){
data[count] = input_data[j];
count++;
}
hipMemcpy(d_data, data, 644*sizeof(float), hipMemcpyHostToDevice);
count = 0;
findBMU<<<20, 50>>>(d_data, d_weights, d_distances);
hipMemcpy(distances, d_distances, sizeof(float)*NUM_NEURONS, hipMemcpyDeviceToHost);
winnerID = thrust::min_element(distances, distances+NUM_NEURONS);
winner = winnerID-distances;
file << neurons[winner].Y<<(i==117 ? "" : ",");
}
file << endl;
for(int i = 0; i<120; i+=3){
for(int j = i*644; j<i*644+644; j++){
data[count] = input_data[j];
count++;
}
hipMemcpy(d_data, data, 644*sizeof(float), hipMemcpyHostToDevice);
count = 0;
findBMU<<<20, 50>>>(d_data, d_weights, d_distances);
hipMemcpy(distances, d_distances, sizeof(float)*NUM_NEURONS, hipMemcpyDeviceToHost);
winnerID = thrust::min_element(distances, distances+NUM_NEURONS);
winner = winnerID-distances;
file << neurons[winner].Z<<(i==117 ? "" : ",");
}
file.close();
outputImageFromNetwork(weights, 0, "OutputFileFromNetworkWeights.bmp");
}
/*
float * test_vector;
test_vector = (float*)malloc(644*sizeof(float));
for(int i = 0; i<644; i++){
test_vector[i] = .32f;
}
float * d_test_vector;
hipMalloc(&d_test_vector, 644*sizeof(float));
hipMemcpy(d_test_vector, test_vector, 644*sizeof(float), hipMemcpyHostToDevice);
float * d_least;
float * least;
least = (float*)malloc(sizeof(float));
*least = 9999999;
hipMalloc(&d_least, sizeof(float));
hipMemcpy(d_least, least, sizeof(float), hipMemcpyHostToDevice);
int * d_winner;
int * winner;
winner = (int*)malloc(sizeof(int));
*winner = 0;
hipMalloc(&d_winner, sizeof(int));
hipMemcpy(d_winner, winner, sizeof(int), hipMemcpyHostToDevice);
*/ | bd8d7d3492d2f19baeda6495153c6027e350a87e.cu | // SOMClassifier.cpp : Defines the entry point for the console application.
//
#include <fstream>
#include <stdio.h>
#include <tchar.h>
#include <iostream>
#include <math.h>
#include <vector>
#include <fstream>
#include <string>
#include <sstream>
#include "EasyBMP\EasyBMP.h"
#include <time.h>
#include <thrust\extrema.h>
#include <math.h>
#define NUM_NEURONS 1000
struct Neuron{
int X;
int Y;
int Z;
};
Neuron * d_neurons;
Neuron * neurons;
float * weights;
float * d_weights;
float * input_data;
float * d_input_data;
using namespace std;
int numIterations = 1000;
float initLearningRate = .8;
int mapWidth = 25, mapHeight = 25, mapDepth = 25;
//int initialMapRadius = 5;
string int_to_str(int i){
stringstream ss;
ss << i;
string str = ss.str();
return str;
}
float map(float value, float istart, float istop, float ostart, float ostop) {
return ostart + (ostop - ostart) * ((value - istart) / (istop - istart));
}
void readData(float * input_data){
int numSamples = 40;
string subject_id = "";
string face_position_num = "";
string face_num = "";
string final = "";
BMP image;
int count = 0;
int countImage = 0;
for(int i = 1; i<=numSamples; i++){
subject_id = "s"+int_to_str(i);
for(int j = 1; j<=10; j++){
face_position_num = int_to_str(j);
for(int k = 0; k<3; k++){
face_num = int_to_str(k);
final = "..\\images\\"+subject_id+"\\"+face_position_num+"\\"+face_num+".bmp";
image.ReadFromFile(final.c_str());
//face_position is the orientation ID of the face
for(int l = 0; l< 23; l++){
for(int m = 0; m<28; m++){
input_data[countImage] = map((((float)image(l, m)->Red)/255), 0, 1, -1, 1);
countImage++;
}
}
}
}
}
}
double calcDistBetweenNodes(Neuron n1, Neuron n2){
double temp = (double)((n1.X-n2.X)*(n1.X-n2.X)+(n1.Y-n2.Y)*(n1.Y-n2.Y)+(n1.Z-n2.Z)*(n1.Z-n2.Z));
return sqrt(temp);
}
__global__ void findBMU(float * inputVector, float * weights, float * distances){
int i = threadIdx.x+(blockIdx.x*blockDim.x);
if(i<NUM_NEURONS){
int offset = i*644;
int count = 0;
float currentDistance = 0;
for(int w = offset; w<offset+644; w++){
currentDistance += abs((inputVector[count]-weights[w]))*abs((inputVector[count]-weights[w]));
count++;
}
distances[i] = sqrt(currentDistance);
}
}
float mapRadius(int time){
double initialMapRadius = max(mapWidth, mapHeight)/1.5;
double timeConstant = numIterations/log(initialMapRadius);
double radius = initialMapRadius*exp(-(time/timeConstant));
return radius;
}
double learningRate(int time){
float iterations = (float)numIterations;
double rate = initLearningRate*exp((-(time/iterations)));
return rate;
}
double theta(float distanceBetweenNodes, float radius){
return exp(-(distanceBetweenNodes*distanceBetweenNodes)/(2*radius*radius));
}
double mapRadius(int time, int initialRadius, int newIterations){
double initialMapRadius = (double)initialRadius;
double timeConstant = newIterations/log(initialMapRadius);
double radius = initialMapRadius*exp(-(time/timeConstant));
return radius;
}
double learningRate(int time, double newLearningRate, int newIterations){
float iterations = (float)newIterations;
double rate = newLearningRate*exp((-(time/iterations)));
return rate;
}
__global__ void printWeights(float*weights){
int i = threadIdx.x+(blockIdx.x*blockDim.x);
printf("%f\n", weights[i]);
}
void train(){
Neuron winningNeuron;
int *winningNeuronID;
winningNeuronID = (int*)malloc(sizeof(int));
int subjectNum;
int positionNum;
float neighboorhoodRadius;
float rate;
float distance;
double coeff;
float * data;
data = (float*)malloc(sizeof(float)*644);
float * d_data;
cudaMalloc(&d_data, sizeof(float)*644);
float * distances;
distances = (float*)malloc(sizeof(float)*NUM_NEURONS);
float * d_distances;
cudaMalloc(&d_distances, sizeof(float)*NUM_NEURONS);
int winner;
float * winnerID;
for(int y = 0; y<numIterations; y++){
//select a random image
positionNum = rand()%10;
subjectNum = rand()%4;
int count = 0;
for(int i = (subjectNum*30+positionNum*3)*644; i<(subjectNum*30+positionNum*3)*644+644; i++){
data[count] = input_data[i];
count++;
}
cudaMemcpy(d_data, data, 644*sizeof(float), cudaMemcpyHostToDevice);
count = 0;
findBMU<<<20, 50>>>(d_data, d_weights, d_distances);
cudaMemcpy(distances, d_distances, sizeof(float)*NUM_NEURONS, cudaMemcpyDeviceToHost);
winnerID = thrust::min_element(distances, distances+NUM_NEURONS);
winner = winnerID-distances;
neighboorhoodRadius = mapRadius(y);
rate = learningRate(y);
for(int h = 0; h<mapWidth; h++){
for(int i = 0; i<mapHeight; i++){
for(int j = 0; j<mapDepth; j++){
distance = calcDistBetweenNodes(neurons[h*(int)(pow((double)mapWidth, 2.0)+i*(mapWidth)+j], neurons[winner]);
if(distance<neighboorhoodRadius){
coeff = theta(distance, neighboorhoodRadius)*rate;
float * newWeight;
newWeight = new float [644];
for (int w = 0; w<644; w++){
double diff = data[w]-weights[((h*100+i*10+j)*644)+w];
newWeight[w] =diff*coeff;
}
for (int w = 0; w<644; w++){
weights[((h*100+i*10+j)*644)+w]+=newWeight[w];
}
delete newWeight;
}
}
}
}
cudaMemcpy(d_weights, weights, NUM_NEURONS*644*sizeof(float), cudaMemcpyHostToDevice);
if(y%10 == 0){
cout << y << endl;
}
}
}
void train(int initialSize, float newLearningRate, float number_of_iterations){
Neuron winningNeuron;
int *winningNeuronID;
winningNeuronID = (int*)malloc(sizeof(int));
int subjectNum;
int positionNum;
float neighboorhoodRadius;
float rate;
float distance;
double coeff;
float * data;
data = (float*)malloc(sizeof(float)*644);
float * d_data;
cudaMalloc(&d_data, sizeof(float)*644);
float * distances;
distances = (float*)malloc(sizeof(float)*NUM_NEURONS);
float * d_distances;
cudaMalloc(&d_distances, sizeof(float)*NUM_NEURONS);
int count = 0;
int winner;
float * winnerID;
for(int y = 0; y<number_of_iterations; y++){
//select a random image
positionNum = rand()%10;
subjectNum = rand()%4;
for(int i = (subjectNum*30+positionNum*3)*644; i<(subjectNum*30+positionNum*3)*644+644; i++){
data[count] = input_data[i];
count++;
}
count = 0;
cudaMemcpy(d_data, data, 644*sizeof(float), cudaMemcpyHostToDevice);
findBMU<<<20, 50>>>(d_data, d_weights, d_distances);
cudaMemcpy(distances, d_distances, sizeof(float)*NUM_NEURONS, cudaMemcpyDeviceToHost);
winnerID = thrust::min_element(distances, distances+NUM_NEURONS);
winner = winnerID-distances;
neighboorhoodRadius = mapRadius(y, initialSize,number_of_iterations );
rate = learningRate(y, newLearningRate, number_of_iterations);
for(int h = 0; h<mapWidth; h++){
for(int i = 0; i<mapHeight; i++){
for(int j = 0; j<mapDepth; j++){
distance = calcDistBetweenNodes(neurons[h*100+i*10+j], neurons[winner]);
if(distance<neighboorhoodRadius){
coeff = theta(distance, neighboorhoodRadius)*rate;
float * newWeight;
newWeight = new float [644];
for (int w = 0; w<644; w++){
double diff = data[w]-weights[((h*100+i*10+j)*644)+w];
newWeight[w] =diff*coeff;
}
for (int w = 0; w<644; w++){
weights[((h*100+i*10+j)*644)+w]+=newWeight[w];
}
delete newWeight;
}
}
}
}
cudaMemcpy(d_weights, weights, NUM_NEURONS*644*sizeof(float), cudaMemcpyHostToDevice);
if(y%10 == 0){
cout << y << endl;
}
}
}
void setXYZ(Neuron * neurons){
for(int i = 0; i<10; i++){
for(int j = 0; j<10; j++){
for(int k = 0; k<10; k++){
neurons[i*100+j*10+k].X = i;
neurons[i*100+j*10+k].Y = j;
neurons[i*100+j*10+k].Z = k;
}
}
}
}
//this is done on HOST side because rand() can't be called device side
void setWeights(float * weights){
for(int i = 0; i<NUM_NEURONS*644; i++){
weights[i] = (double)rand()/RAND_MAX;
}
}
void outputImageFromNetwork(float*input_data, int offset, char*outputFileName){
BMP image;
image.SetSize(23, 38);
RGBApixel pixel;
int count = 0;
for(int i = 0; i<23; i++){
for(int j = 0; j<28; j++){
pixel.Red = map(input_data[offset+count], 0, 1, 0, 255);
pixel.Blue = map(input_data[offset+count], 0, 1, 0, 255);
pixel.Green = map(input_data[offset+count], 0, 1, 0, 255);
image.SetPixel(i, j, pixel);
count++;
}
}
image.WriteToFile(outputFileName);
}
int main(int argc, char*argv[])
{
printf("helllooooooo \n");
/*
* SET XYZ FOR THE HOST AND DEVICE NEURONS
*/
//-------------------------------------------------
neurons = (Neuron *)malloc(NUM_NEURONS*sizeof(Neuron)); // allocate memory for host neurons
cudaMalloc((void**)&d_neurons, NUM_NEURONS*sizeof(Neuron)); // allocate memory for device neurons
dim3 dim = *(new dim3(10, 10, 10));
setXYZ(neurons); //set XYZ params on DEVICE side
//cudaMemcpy(neurons, d_neurons, NUM_NEURONS*sizeof(Neuron), cudaMemcpyDeviceToHost); // copy over to host
//-------------------------------------------------
printf("Stage 1 complete \n");
/*
* Initialize weights
*/
//------------------------------------------------
srand(time(NULL));
weights = (float*)malloc(NUM_NEURONS*644*sizeof(float)); // allocate mem for host weights
cudaMalloc(&d_weights, NUM_NEURONS*644*sizeof(float)); // allocate mem for device weights
setWeights(weights); // set weights on the HOST side
cudaMemcpy(d_weights, weights, NUM_NEURONS*644*sizeof(float), cudaMemcpyHostToDevice); // copy over to device
//-------------------------------------------------
printf("Stage 2 complete \n");
/*
* Read data from file
*/
//-------------------------------------------------
input_data = (float *) malloc(1200*644*sizeof(float)); //allocate mem for host image data
cudaMalloc(&d_input_data, 1200*644*sizeof(float)); //allocate mem for device image data
readData(input_data); // read data with host array
cudaMemcpy(d_input_data, input_data, 1200*644*sizeof(float), cudaMemcpyHostToDevice); //copy to device
//-------------------------------------------------
printf("Training started \n");
train();
train(2, .02, 500);
cudaMemcpy(d_weights, weights, NUM_NEURONS*644*sizeof(float), cudaMemcpyHostToDevice);
int count = 0;
float * data;
data = (float*)malloc(sizeof(float)*644);
float * d_data;
cudaMalloc(&d_data, sizeof(float)*644);
float * distances;
distances = (float*)malloc(sizeof(float)*NUM_NEURONS);
float * d_distances;
cudaMalloc(&d_distances, sizeof(float)*NUM_NEURONS);
int winner;
float *winnerID;
ofstream file;
file.open("data.txt");
for(int i = 0; i<120; i+=3){
for(int j = i*644; j<i*644+644; j++){
data[count] = input_data[j];
count++;
}
cudaMemcpy(d_data, data, 644*sizeof(float), cudaMemcpyHostToDevice);
count = 0;
findBMU<<<20, 50>>>(d_data, d_weights, d_distances);
cudaMemcpy(distances, d_distances, sizeof(float)*NUM_NEURONS, cudaMemcpyDeviceToHost);
winnerID = thrust::min_element(distances, distances+NUM_NEURONS);
winner = winnerID-distances;
file << neurons[winner].X<<(i==117 ? "" : ",");
}
file << endl;
for(int i = 0; i<120; i+=3){
for(int j = i*644; j<i*644+644; j++){
data[count] = input_data[j];
count++;
}
cudaMemcpy(d_data, data, 644*sizeof(float), cudaMemcpyHostToDevice);
count = 0;
findBMU<<<20, 50>>>(d_data, d_weights, d_distances);
cudaMemcpy(distances, d_distances, sizeof(float)*NUM_NEURONS, cudaMemcpyDeviceToHost);
winnerID = thrust::min_element(distances, distances+NUM_NEURONS);
winner = winnerID-distances;
file << neurons[winner].Y<<(i==117 ? "" : ",");
}
file << endl;
for(int i = 0; i<120; i+=3){
for(int j = i*644; j<i*644+644; j++){
data[count] = input_data[j];
count++;
}
cudaMemcpy(d_data, data, 644*sizeof(float), cudaMemcpyHostToDevice);
count = 0;
findBMU<<<20, 50>>>(d_data, d_weights, d_distances);
cudaMemcpy(distances, d_distances, sizeof(float)*NUM_NEURONS, cudaMemcpyDeviceToHost);
winnerID = thrust::min_element(distances, distances+NUM_NEURONS);
winner = winnerID-distances;
file << neurons[winner].Z<<(i==117 ? "" : ",");
}
file.close();
outputImageFromNetwork(weights, 0, "OutputFileFromNetworkWeights.bmp");
}
/*
float * test_vector;
test_vector = (float*)malloc(644*sizeof(float));
for(int i = 0; i<644; i++){
test_vector[i] = .32f;
}
float * d_test_vector;
cudaMalloc(&d_test_vector, 644*sizeof(float));
cudaMemcpy(d_test_vector, test_vector, 644*sizeof(float), cudaMemcpyHostToDevice);
float * d_least;
float * least;
least = (float*)malloc(sizeof(float));
*least = 9999999;
cudaMalloc(&d_least, sizeof(float));
cudaMemcpy(d_least, least, sizeof(float), cudaMemcpyHostToDevice);
int * d_winner;
int * winner;
winner = (int*)malloc(sizeof(int));
*winner = 0;
cudaMalloc(&d_winner, sizeof(int));
cudaMemcpy(d_winner, winner, sizeof(int), cudaMemcpyHostToDevice);
*/ |
b11b10c79a7e29823a12b3885e353d987769b9a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
* MNeumann (April 2010): Removed shrUtil dependency and added external declarations
* to enable usage for MNRT.
*
*/
//#include <shrUtils.h>
#include <stdlib.h>
#include <stdio.h>
#include "cuda_utils.h"
#include "MersenneTwister.h"
//#include "MNCudaUtil.h"
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
////////////////////////////////////////////////////////////////////////////////
// Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random.
// For coalesced global writes MT_RNG_COUNT should be a multiple of warp size.
// Initial states for each generator are the same, since the states are
// initialized from the global seed. In order to improve distribution properties
// on small NPerRng supply dedicated (local) seed to each twister.
// The local seeds, in their turn, can be extracted from global seed
// by means of any simple random number generator, like LCG.
////////////////////////////////////////////////////////////////////////////////
__global__ void RandomGPU(
float *d_Random,
int NPerRng
){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){
//Load bit-vector Mersenne Twister parameters
mt_struct_stripped config = ds_MT[iRng];
//Initialize current state
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(iOut = 0; iOut < NPerRng; iOut++){
//iState1 = (iState + 1) % MT_NN
//iStateM = (iState + MT_MM) % MT_NN
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Transform each of MT_RNG_COUNT lanes of NPerRng uniformly distributed
// random samples, produced by RandomGPU(), to normally distributed lanes
// using Cartesian form of Box-Muller transformation.
// NPerRng must be even.
////////////////////////////////////////////////////////////////////////////////
#define PI 3.14159265358979f
__device__ inline void BoxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * PI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
__global__ void BoxMullerGPU(float *d_Random, int NPerRng){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N)
for(int iOut = 0; iOut < NPerRng; iOut += 2)
BoxMuller(
d_Random[iRng + (iOut + 0) * MT_RNG_COUNT],
d_Random[iRng + (iOut + 1) * MT_RNG_COUNT]
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" bool MersenneTwisterGPUInit(const char *fname)
///
/// \brief Loads Mersenne Twister configuration from given source file.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param fname Filename of the configuration file.
///
/// \return true if it succeeds, false if it fails.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
bool MersenneTwisterGPUInit(const char *fname)
{
FILE *fd = fopen(fname, "rb");
if(!fd)
{
//MNFatal("Failed to open %s for Mersenne Twister configuration.", fname);
return false;
}
if( !fread(h_MT, sizeof(h_MT), 1, fd) )
{
//MNFatal("Failed to load %s for Mersenne Twister configuration.", fname);
return false;
}
fclose(fd);
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" void MersenneTwisterGPUSeed(unsigned int seed)
///
/// \brief Seeds Mersenne Twister for current GPU context.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param seed The seed.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void MersenneTwisterGPUSeed(unsigned int seed)
{
int i;
//Need to be thread-safe
mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT * sizeof(mt_struct_stripped));
for(i = 0; i < MT_RNG_COUNT; i++){
MT[i] = h_MT[i];
MT[i].seed = seed;
}
hipError_t err = hipMemcpyToSymbol(ds_MT, MT, sizeof(h_MT));
assert(err == hipSuccess);
free(MT);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" void MersenneTwisterGPU(float* d_outRand, int nPerRNG)
///
/// \brief Performs Mersenne Twister RNG to generate a predefined number of uniform random
/// numbers to use in other kernels.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param [out] d_outRand The generated uniform random numbers.
/// \param nPerRNG The random numbers per generator. Will generate
/// nPerRNG * MT_RNG_COUNT numbers.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void MersenneTwisterGPU(float* d_outRand, int nPerRNG)
{
// 32 * 128 = MT_RNG_COUNT = 4096. See SDK 3.0 sample.
hipLaunchKernelGGL(( RandomGPU), dim3(32), dim3(128), 0, 0, d_outRand, nPerRNG);
// MNCUDA_CHECKERROR;
// CUDA_CHECK_ERROR;
//BoxMullerGPU<<<32, 128>>>(d_outRand, nPerRNG);
} | b11b10c79a7e29823a12b3885e353d987769b9a0.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
* MNeumann (April 2010): Removed shrUtil dependency and added external declarations
* to enable usage for MNRT.
*
*/
//#include <shrUtils.h>
#include <stdlib.h>
#include <stdio.h>
#include "cuda_utils.h"
#include "MersenneTwister.h"
//#include "MNCudaUtil.h"
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
////////////////////////////////////////////////////////////////////////////////
// Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random.
// For coalesced global writes MT_RNG_COUNT should be a multiple of warp size.
// Initial states for each generator are the same, since the states are
// initialized from the global seed. In order to improve distribution properties
// on small NPerRng supply dedicated (local) seed to each twister.
// The local seeds, in their turn, can be extracted from global seed
// by means of any simple random number generator, like LCG.
////////////////////////////////////////////////////////////////////////////////
__global__ void RandomGPU(
float *d_Random,
int NPerRng
){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){
//Load bit-vector Mersenne Twister parameters
mt_struct_stripped config = ds_MT[iRng];
//Initialize current state
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(iOut = 0; iOut < NPerRng; iOut++){
//iState1 = (iState + 1) % MT_NN
//iStateM = (iState + MT_MM) % MT_NN
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Transform each of MT_RNG_COUNT lanes of NPerRng uniformly distributed
// random samples, produced by RandomGPU(), to normally distributed lanes
// using Cartesian form of Box-Muller transformation.
// NPerRng must be even.
////////////////////////////////////////////////////////////////////////////////
#define PI 3.14159265358979f
__device__ inline void BoxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * PI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
__global__ void BoxMullerGPU(float *d_Random, int NPerRng){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N)
for(int iOut = 0; iOut < NPerRng; iOut += 2)
BoxMuller(
d_Random[iRng + (iOut + 0) * MT_RNG_COUNT],
d_Random[iRng + (iOut + 1) * MT_RNG_COUNT]
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" bool MersenneTwisterGPUInit(const char *fname)
///
/// \brief Loads Mersenne Twister configuration from given source file.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param fname Filename of the configuration file.
///
/// \return true if it succeeds, false if it fails.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
bool MersenneTwisterGPUInit(const char *fname)
{
FILE *fd = fopen(fname, "rb");
if(!fd)
{
//MNFatal("Failed to open %s for Mersenne Twister configuration.", fname);
return false;
}
if( !fread(h_MT, sizeof(h_MT), 1, fd) )
{
//MNFatal("Failed to load %s for Mersenne Twister configuration.", fname);
return false;
}
fclose(fd);
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" void MersenneTwisterGPUSeed(unsigned int seed)
///
/// \brief Seeds Mersenne Twister for current GPU context.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param seed The seed.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void MersenneTwisterGPUSeed(unsigned int seed)
{
int i;
//Need to be thread-safe
mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT * sizeof(mt_struct_stripped));
for(i = 0; i < MT_RNG_COUNT; i++){
MT[i] = h_MT[i];
MT[i].seed = seed;
}
cudaError_t err = cudaMemcpyToSymbol(ds_MT, MT, sizeof(h_MT));
assert(err == cudaSuccess);
free(MT);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" void MersenneTwisterGPU(float* d_outRand, int nPerRNG)
///
/// \brief Performs Mersenne Twister RNG to generate a predefined number of uniform random
/// numbers to use in other kernels.
///
/// \author Mathias Neumann
/// \date 11.04.2010
///
/// \param [out] d_outRand The generated uniform random numbers.
/// \param nPerRNG The random numbers per generator. Will generate
/// nPerRNG * MT_RNG_COUNT numbers.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void MersenneTwisterGPU(float* d_outRand, int nPerRNG)
{
// 32 * 128 = MT_RNG_COUNT = 4096. See SDK 3.0 sample.
RandomGPU<<<32, 128>>>(d_outRand, nPerRNG);
// MNCUDA_CHECKERROR;
// CUDA_CHECK_ERROR;
//BoxMullerGPU<<<32, 128>>>(d_outRand, nPerRNG);
} |
75690927550376ce371a43b9c3a7ccbee28c66df.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
The convolution version of 12_gemm_bias_relu. Similarly, we put bias vector in Operand C and the
rest is the same as normal convolution.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha in linear combination
cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // alpha X C + per channel bias
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
int run() {
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
{1, 7, 7, 512}, // activation
{512, 3, 3, 512}, // filter
{1, 1, 1, 1}, // padding
{1, 1}, // striding
{1, 1}, // dilation
cutlass::conv::Mode::kCrossCorrelation, // mode (convolution or cross-correlation)
1 // split-k slices
);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.activation_extent());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.filter_extent());
// Create tensor C with dimensions 1x1x1xk which is the bias vector
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias({1, 1, 1, problem_size.K});
// Create tensor D used to store output from CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.output_extent());
// Create matrix D with dimensions M x N used to store output from reference
// kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.output_extent());
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_bias.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c_bias.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(), // <- reference to tensor A on device
tensor_b.device_ref(), // <- reference to tensor B on device
// tensor C is treated as the bias vector. We can enable the CONV
// to project away the N, H, W dimension by setting the stride to zero.
{tensor_c_bias.device_data(), LayoutOutput::Stride(0)},
tensor_d.device_ref(), // <- reference to tensor D on device
{alpha} };
// Instantiate CUTLASS kernel depending on templates
ImplicitGemm implicit_gemm_op;
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check the problem size is supported or not
cutlass::Status status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = implicit_gemm_op();
CUTLASS_CHECK(status);
//
// Create instantiation for device reference conv kernel
//
// Launch device reference to compute strictly the product A * B
cutlass::reference::device::Conv2d<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>>
(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c_bias.device_ref(),
tensor_ref_d.device_ref(),
alpha, ElementComputeEpilogue(0)
);
// Wait for kernels to finish
hipDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Compute bias + relu in host code
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
tensor_ref_d.at({n, p, q, k}) =
::max(ElementOutput(0),
ElementOutput(tensor_ref_d.at({n, p, q, k}) +
tensor_c_bias.at({0, 0, 0, k})));
}
}
}
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(),
tensor_ref_d.host_view())
? "Passed"
: "Failed")
<< std::endl;
CUTLASS_CHECK(status);
return 0;
}
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
CUDA_CHECK(hipGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
return run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 75690927550376ce371a43b9c3a7ccbee28c66df.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
The convolution version of 12_gemm_bias_relu. Similarly, we put bias vector in Operand C and the
rest is the same as normal convolution.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha in linear combination
cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // alpha X C + per channel bias
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
int run() {
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
{1, 7, 7, 512}, // activation
{512, 3, 3, 512}, // filter
{1, 1, 1, 1}, // padding
{1, 1}, // striding
{1, 1}, // dilation
cutlass::conv::Mode::kCrossCorrelation, // mode (convolution or cross-correlation)
1 // split-k slices
);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.activation_extent());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.filter_extent());
// Create tensor C with dimensions 1x1x1xk which is the bias vector
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias({1, 1, 1, problem_size.K});
// Create tensor D used to store output from CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.output_extent());
// Create matrix D with dimensions M x N used to store output from reference
// kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.output_extent());
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_bias.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c_bias.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(), // <- reference to tensor A on device
tensor_b.device_ref(), // <- reference to tensor B on device
// tensor C is treated as the bias vector. We can enable the CONV
// to project away the N, H, W dimension by setting the stride to zero.
{tensor_c_bias.device_data(), LayoutOutput::Stride(0)},
tensor_d.device_ref(), // <- reference to tensor D on device
{alpha} };
// Instantiate CUTLASS kernel depending on templates
ImplicitGemm implicit_gemm_op;
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check the problem size is supported or not
cutlass::Status status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = implicit_gemm_op();
CUTLASS_CHECK(status);
//
// Create instantiation for device reference conv kernel
//
// Launch device reference to compute strictly the product A * B
cutlass::reference::device::Conv2d<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>>
(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c_bias.device_ref(),
tensor_ref_d.device_ref(),
alpha, ElementComputeEpilogue(0)
);
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Compute bias + relu in host code
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
tensor_ref_d.at({n, p, q, k}) =
std::max(ElementOutput(0),
ElementOutput(tensor_ref_d.at({n, p, q, k}) +
tensor_c_bias.at({0, 0, 0, k})));
}
}
}
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(),
tensor_ref_d.host_view())
? "Passed"
: "Failed")
<< std::endl;
CUTLASS_CHECK(status);
return 0;
}
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
return run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
38f765abd0e738395b4c322f7cf14e13447c99f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zlarfg-v2.cu normal z -> s, Fri Jul 18 17:34:12 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_s
__global__
void magma_slarfg_gpu_kernel( int n, float* dx0, float* dx,
float *dtau, float *dxnorm, float* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ float scale;
float xnorm;
float dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_S_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
float alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
float beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
float alphar = MAGMA_S_REAL(alpha);
float alphai = MAGMA_S_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
float beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_S_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_S_MAKE(beta, 0.);
alpha = MAGMA_S_MAKE( MAGMA_S_REAL(alpha) - beta, MAGMA_S_IMAG(alpha));
scale = MAGMA_S_DIV( MAGMA_S_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_S_MUL(dxi, scale);
} else {
*dtau = MAGMA_S_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's slarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_slarfg_gpu( magma_int_t n, float *dx0, float *dx,
float *dtau, float *dxnorm, float *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_snrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_snrm2_cols(n-1, 1, dx0+1, n, dxnorm);
hipLaunchKernelGGL(( magma_slarfg_gpu_kernel), dim3(blocks), dim3(threads),
0, magma_stream , n, dx0, dx, dtau, dxnorm, dAkk);
}
| 38f765abd0e738395b4c322f7cf14e13447c99f4.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zlarfg-v2.cu normal z -> s, Fri Jul 18 17:34:12 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_s
__global__
void magma_slarfg_gpu_kernel( int n, float* dx0, float* dx,
float *dtau, float *dxnorm, float* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ float scale;
float xnorm;
float dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_S_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
float alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
float beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
float alphar = MAGMA_S_REAL(alpha);
float alphai = MAGMA_S_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
float beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_S_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_S_MAKE(beta, 0.);
alpha = MAGMA_S_MAKE( MAGMA_S_REAL(alpha) - beta, MAGMA_S_IMAG(alpha));
scale = MAGMA_S_DIV( MAGMA_S_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_S_MUL(dxi, scale);
} else {
*dtau = MAGMA_S_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's slarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_slarfg_gpu( magma_int_t n, float *dx0, float *dx,
float *dtau, float *dxnorm, float *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_snrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_snrm2_cols(n-1, 1, dx0+1, n, dxnorm);
magma_slarfg_gpu_kernel<<< blocks, threads,
0, magma_stream >>>(n, dx0, dx, dtau, dxnorm, dAkk);
}
|
9521fcabbc454dabe238c2162a5ee0bbfbe54213.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Functions for computing the two hop neighbor pairs of a graph
*
* @file two_hop_neighbors.cu
* ---------------------------------------------------------------------------**/
#include <cugraph/algorithms.hpp>
#include <cugraph/legacy/graph.hpp>
#include <cugraph/utilities/error.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include "two_hop_neighbors.cuh"
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace cugraph {
template <typename VT, typename ET, typename WT>
std::unique_ptr<legacy::GraphCOO<VT, ET, WT>> get_two_hop_neighbors(
legacy::GraphCSRView<VT, ET, WT> const& graph)
{
hipStream_t stream{nullptr};
rmm::device_vector<ET> exsum_degree(graph.number_of_edges + 1);
ET* d_exsum_degree = exsum_degree.data().get();
// Find the degree of the out vertex of each edge
degree_iterator<ET> deg_it(graph.offsets);
deref_functor<degree_iterator<ET>, ET> deref(deg_it);
exsum_degree[0] = ET{0};
thrust::transform(rmm::exec_policy(stream),
graph.indices,
graph.indices + graph.number_of_edges,
d_exsum_degree + 1,
deref);
// Take the inclusive sum of the degrees
thrust::inclusive_scan(rmm::exec_policy(stream),
d_exsum_degree + 1,
d_exsum_degree + graph.number_of_edges + 1,
d_exsum_degree + 1);
// Copy out the last value to get the size of scattered output
ET output_size = exsum_degree[graph.number_of_edges];
// Allocate memory for the scattered output
rmm::device_vector<VT> first_pair(output_size);
rmm::device_vector<VT> second_pair(output_size);
VT* d_first_pair = first_pair.data().get();
VT* d_second_pair = second_pair.data().get();
// Figure out number of blocks and allocate memory for block bucket offsets
ET num_blocks = (output_size + TWO_HOP_BLOCK_SIZE - 1) / TWO_HOP_BLOCK_SIZE;
rmm::device_vector<ET> block_bucket_offsets(num_blocks + 1);
ET* d_block_bucket_offsets = block_bucket_offsets.data().get();
// Compute the block bucket offsets
dim3 grid, block;
block.x = 512;
grid.x = min((ET)MAXBLOCKS, (num_blocks / 512) + 1);
hipLaunchKernelGGL(( compute_bucket_offsets_kernel), dim3(grid), dim3(block), 0, nullptr,
d_exsum_degree, d_block_bucket_offsets, graph.number_of_edges, output_size);
block_bucket_offsets[num_blocks] = graph.number_of_edges;
// Scatter the expanded edge lists into temp space
grid.x = min((ET)MAXBLOCKS, num_blocks);
hipLaunchKernelGGL(( scatter_expand_kernel), dim3(grid), dim3(block), 0, nullptr, d_exsum_degree,
graph.indices,
graph.offsets,
d_block_bucket_offsets,
graph.number_of_vertices,
output_size,
num_blocks,
d_first_pair,
d_second_pair);
// TODO: This would be faster in a hash table (no sorting), unless there's
// some reason that the result has to be sorted
// Remove duplicates and self pairings
auto tuple_start = thrust::make_zip_iterator(thrust::make_tuple(d_first_pair, d_second_pair));
auto tuple_end = tuple_start + output_size;
thrust::sort(rmm::exec_policy(stream), tuple_start, tuple_end);
tuple_end = thrust::copy_if(
rmm::exec_policy(stream), tuple_start, tuple_end, tuple_start, self_loop_flagger<VT>());
tuple_end = thrust::unique(rmm::exec_policy(stream), tuple_start, tuple_end);
// Get things ready to return
ET outputSize = tuple_end - tuple_start;
auto result =
std::make_unique<legacy::GraphCOO<VT, ET, WT>>(graph.number_of_vertices, outputSize, false);
hipMemcpy(result->src_indices(), d_first_pair, sizeof(VT) * outputSize, hipMemcpyDefault);
hipMemcpy(result->dst_indices(), d_second_pair, sizeof(VT) * outputSize, hipMemcpyDefault);
return result;
}
template std::unique_ptr<legacy::GraphCOO<int, int, float>> get_two_hop_neighbors(
legacy::GraphCSRView<int, int, float> const&);
template std::unique_ptr<legacy::GraphCOO<int, int, double>> get_two_hop_neighbors(
legacy::GraphCSRView<int, int, double> const&);
} // namespace cugraph
| 9521fcabbc454dabe238c2162a5ee0bbfbe54213.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Functions for computing the two hop neighbor pairs of a graph
*
* @file two_hop_neighbors.cu
* ---------------------------------------------------------------------------**/
#include <cugraph/algorithms.hpp>
#include <cugraph/legacy/graph.hpp>
#include <cugraph/utilities/error.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include "two_hop_neighbors.cuh"
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace cugraph {
template <typename VT, typename ET, typename WT>
std::unique_ptr<legacy::GraphCOO<VT, ET, WT>> get_two_hop_neighbors(
legacy::GraphCSRView<VT, ET, WT> const& graph)
{
cudaStream_t stream{nullptr};
rmm::device_vector<ET> exsum_degree(graph.number_of_edges + 1);
ET* d_exsum_degree = exsum_degree.data().get();
// Find the degree of the out vertex of each edge
degree_iterator<ET> deg_it(graph.offsets);
deref_functor<degree_iterator<ET>, ET> deref(deg_it);
exsum_degree[0] = ET{0};
thrust::transform(rmm::exec_policy(stream),
graph.indices,
graph.indices + graph.number_of_edges,
d_exsum_degree + 1,
deref);
// Take the inclusive sum of the degrees
thrust::inclusive_scan(rmm::exec_policy(stream),
d_exsum_degree + 1,
d_exsum_degree + graph.number_of_edges + 1,
d_exsum_degree + 1);
// Copy out the last value to get the size of scattered output
ET output_size = exsum_degree[graph.number_of_edges];
// Allocate memory for the scattered output
rmm::device_vector<VT> first_pair(output_size);
rmm::device_vector<VT> second_pair(output_size);
VT* d_first_pair = first_pair.data().get();
VT* d_second_pair = second_pair.data().get();
// Figure out number of blocks and allocate memory for block bucket offsets
ET num_blocks = (output_size + TWO_HOP_BLOCK_SIZE - 1) / TWO_HOP_BLOCK_SIZE;
rmm::device_vector<ET> block_bucket_offsets(num_blocks + 1);
ET* d_block_bucket_offsets = block_bucket_offsets.data().get();
// Compute the block bucket offsets
dim3 grid, block;
block.x = 512;
grid.x = min((ET)MAXBLOCKS, (num_blocks / 512) + 1);
compute_bucket_offsets_kernel<<<grid, block, 0, nullptr>>>(
d_exsum_degree, d_block_bucket_offsets, graph.number_of_edges, output_size);
block_bucket_offsets[num_blocks] = graph.number_of_edges;
// Scatter the expanded edge lists into temp space
grid.x = min((ET)MAXBLOCKS, num_blocks);
scatter_expand_kernel<<<grid, block, 0, nullptr>>>(d_exsum_degree,
graph.indices,
graph.offsets,
d_block_bucket_offsets,
graph.number_of_vertices,
output_size,
num_blocks,
d_first_pair,
d_second_pair);
// TODO: This would be faster in a hash table (no sorting), unless there's
// some reason that the result has to be sorted
// Remove duplicates and self pairings
auto tuple_start = thrust::make_zip_iterator(thrust::make_tuple(d_first_pair, d_second_pair));
auto tuple_end = tuple_start + output_size;
thrust::sort(rmm::exec_policy(stream), tuple_start, tuple_end);
tuple_end = thrust::copy_if(
rmm::exec_policy(stream), tuple_start, tuple_end, tuple_start, self_loop_flagger<VT>());
tuple_end = thrust::unique(rmm::exec_policy(stream), tuple_start, tuple_end);
// Get things ready to return
ET outputSize = tuple_end - tuple_start;
auto result =
std::make_unique<legacy::GraphCOO<VT, ET, WT>>(graph.number_of_vertices, outputSize, false);
cudaMemcpy(result->src_indices(), d_first_pair, sizeof(VT) * outputSize, cudaMemcpyDefault);
cudaMemcpy(result->dst_indices(), d_second_pair, sizeof(VT) * outputSize, cudaMemcpyDefault);
return result;
}
template std::unique_ptr<legacy::GraphCOO<int, int, float>> get_two_hop_neighbors(
legacy::GraphCSRView<int, int, float> const&);
template std::unique_ptr<legacy::GraphCOO<int, int, double>> get_two_hop_neighbors(
legacy::GraphCSRView<int, int, double> const&);
} // namespace cugraph
|
3576400aeae9a389a68be266159caceb5ed821ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#define BLOCK_SIZE 32
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
#define CUDA_CALL_SAFE(f) \
do \
{ \
hipError_t _cuda_error = f; \
if (_cuda_error != hipSuccess) \
{ \
fprintf(stderr, \
"%s, %d, CUDA ERROR: %s %s\n", \
__FILE__, \
__LINE__, \
hipGetErrorName(_cuda_error), \
hipGetErrorString(_cuda_error) \
); \
abort(); \
exit(EXIT_FAILURE); \
} \
} while (0)
static inline double time_diff(struct timeval tv_start, struct timeval tv_end)
{
return (double)(tv_end.tv_sec - tv_start.tv_sec) * 1000.0 + (double)(tv_end.tv_usec - tv_start.tv_usec) / 1000.0;
}
/*void writeoutput(float *vect, int grid_rows, int grid_cols, char *file)
{
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
fclose(fp);
}*/
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file)
{
FILE *fp;
if ((fp = fopen(file, "wb")) == 0)
{
fprintf(stderr, "The file was not opened\n");
abort();
exit(EXIT_FAILURE);
}
if (fwrite((char *)vect, sizeof(float) * grid_rows * grid_cols, 1, fp) != 1)
{
fprintf(stderr, "The file was not written\n");
abort();
exit(EXIT_FAILURE);
}
fsync(fileno(fp));
fclose(fp);
}
/*void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}*/
void readinput(float *vect, int grid_rows, int grid_cols, char *file)
{
FILE *fp;
if((fp = fopen(file, "rb")) == 0)
{
fprintf(stderr, "The file was not opened\n");
abort();
exit(EXIT_FAILURE);
}
if (fread((char *)vect, sizeof(float) * grid_rows * grid_cols, 1, fp) != 1)
{
fprintf(stderr, "The file was not read\n");
abort();
exit(EXIT_FAILURE);
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(long iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
long grid_cols, //Col of grid
long grid_rows, //Row of grid
long border_cols, // border offset
long border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed)
{
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
long bx = blockIdx.x;
long by = blockIdx.y;
long tx = threadIdx.x;
long ty = threadIdx.y;
step_div_Cap = step / Cap;
Rx_1 = 1 / Rx;
Ry_1 = 1 / Ry;
Rz_1 = 1 / Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
long small_block_rows = BLOCK_SIZE - iteration * 2;//EXPAND_RATE
long small_block_cols = BLOCK_SIZE - iteration * 2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
long blkY = small_block_rows * by - border_rows;
long blkX = small_block_cols * bx - border_cols;
long blkYmax = blkY + BLOCK_SIZE - 1;
long blkXmax = blkX + BLOCK_SIZE - 1;
// calculate the global thread coordination
long yidx = blkY + ty;
long xidx = blkX + tx;
// load data if it is within the valid input range
long loadYidx = yidx, loadXidx = xidx;
long index = grid_cols * loadYidx + loadXidx;
if (IN_RANGE(loadYidx, 0, grid_rows - 1) && IN_RANGE(loadXidx, 0, grid_cols - 1))
{
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index]; // Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
long validYmin = (blkY < 0) ? -blkY : 0;
long validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
long validXmin = (blkX < 0) ? -blkX : 0;
long validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
long N = ty-1;
long S = ty+1;
long W = tx-1;
long E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (long i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower, float *MatrixTemp[2], long col, long row, \
long total_iterations, long num_iterations, long blockCols, long blockRows, long borderCols, long borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed = 0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t += num_iterations)
{
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock) , 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower, MatrixTemp[src], MatrixTemp[dst], \
col, row, borderCols, borderRows, Cap, Rx, Ry, Rz, step, time_elapsed);
}
CUDA_CALL_SAFE(hipDeviceSynchronize());
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char **argv)
{
size_t size;
long grid_rows, grid_cols;
float *FilesavingTemp, *FilesavingPower, *MatrixOut;
char *tfile, *pfile, *ofile;
long total_iterations = 60;
long pyramid_height = 1; // number of iterations
struct timeval tv_start, tv_end;
double kernel_time = 0; // in ms
double writefile_time = 0; // in ms
double readfile_time = 0; // in ms
double d2h_memcpy_time = 0; // in ms
double h2d_memcpy_time = 0; // in ms
if (argc != 7)
usage(argc, argv);
if((grid_rows = atol(argv[1]))<=0||
(grid_cols = atol(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile = argv[4];
pfile = argv[5];
ofile = argv[6];
size = grid_rows * grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
long borderCols = (pyramid_height)*EXPAND_RATE/2;
long borderRows = (pyramid_height)*EXPAND_RATE/2;
long smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
long smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
long blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
long blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *)malloc(size * sizeof(float));
FilesavingPower = (float *)malloc(size * sizeof(float));
MatrixOut = (float *)calloc(size, sizeof(float));
if(!FilesavingPower || !FilesavingTemp || !MatrixOut)
{
fprintf(stderr, "unable to allocate memory\n");
abort();
exit(EXIT_FAILURE);
}
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
gettimeofday(&tv_start, NULL);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
gettimeofday(&tv_end, NULL);
readfile_time += time_diff(tv_start, tv_end);
float *MatrixTemp[2], *MatrixPower;
CUDA_CALL_SAFE(hipMalloc((void **)&MatrixTemp[0], sizeof(float) * size));
CUDA_CALL_SAFE(hipMalloc((void **)&MatrixTemp[1], sizeof(float) * size));
gettimeofday(&tv_start, NULL);
CUDA_CALL_SAFE(hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float) * size, hipMemcpyHostToDevice));
gettimeofday(&tv_end, NULL);
h2d_memcpy_time += time_diff(tv_start, tv_end);
CUDA_CALL_SAFE(hipMalloc((void **)&MatrixPower, sizeof(float) * size));
gettimeofday(&tv_start, NULL);
CUDA_CALL_SAFE(hipMemcpy(MatrixPower, FilesavingPower, sizeof(float) * size, hipMemcpyHostToDevice));
gettimeofday(&tv_end, NULL);
h2d_memcpy_time += time_diff(tv_start, tv_end);
printf("Start computing the transient temperature\n");
gettimeofday(&tv_start, NULL);
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
gettimeofday(&tv_end, NULL);
kernel_time += time_diff(tv_start, tv_end);
printf("Ending simulation\n");
gettimeofday(&tv_start, NULL);
CUDA_CALL_SAFE(hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float) * size, hipMemcpyDeviceToHost));
gettimeofday(&tv_end, NULL);
d2h_memcpy_time += time_diff(tv_start, tv_end);
gettimeofday(&tv_start, NULL);
writeoutput(MatrixOut, grid_rows, grid_cols, ofile);
gettimeofday(&tv_end, NULL);
writefile_time += time_diff(tv_start, tv_end);
CUDA_CALL_SAFE(hipFree(MatrixPower));
CUDA_CALL_SAFE(hipFree(MatrixTemp[0]));
CUDA_CALL_SAFE(hipFree(MatrixTemp[1]));
free(MatrixOut);
printf("==> header: kernel_time (ms),writefile_time (ms),d2h_memcpy_time (ms),readfile_time (ms),h2d_memcpy_time (ms)\n");
printf("==> data: %f,%f,%f,%f,%f\n", kernel_time, writefile_time, d2h_memcpy_time, readfile_time, h2d_memcpy_time);
}
| 3576400aeae9a389a68be266159caceb5ed821ba.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#define BLOCK_SIZE 32
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
#define CUDA_CALL_SAFE(f) \
do \
{ \
cudaError_t _cuda_error = f; \
if (_cuda_error != cudaSuccess) \
{ \
fprintf(stderr, \
"%s, %d, CUDA ERROR: %s %s\n", \
__FILE__, \
__LINE__, \
cudaGetErrorName(_cuda_error), \
cudaGetErrorString(_cuda_error) \
); \
abort(); \
exit(EXIT_FAILURE); \
} \
} while (0)
static inline double time_diff(struct timeval tv_start, struct timeval tv_end)
{
return (double)(tv_end.tv_sec - tv_start.tv_sec) * 1000.0 + (double)(tv_end.tv_usec - tv_start.tv_usec) / 1000.0;
}
/*void writeoutput(float *vect, int grid_rows, int grid_cols, char *file)
{
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
fclose(fp);
}*/
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file)
{
FILE *fp;
if ((fp = fopen(file, "wb")) == 0)
{
fprintf(stderr, "The file was not opened\n");
abort();
exit(EXIT_FAILURE);
}
if (fwrite((char *)vect, sizeof(float) * grid_rows * grid_cols, 1, fp) != 1)
{
fprintf(stderr, "The file was not written\n");
abort();
exit(EXIT_FAILURE);
}
fsync(fileno(fp));
fclose(fp);
}
/*void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}*/
void readinput(float *vect, int grid_rows, int grid_cols, char *file)
{
FILE *fp;
if((fp = fopen(file, "rb")) == 0)
{
fprintf(stderr, "The file was not opened\n");
abort();
exit(EXIT_FAILURE);
}
if (fread((char *)vect, sizeof(float) * grid_rows * grid_cols, 1, fp) != 1)
{
fprintf(stderr, "The file was not read\n");
abort();
exit(EXIT_FAILURE);
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(long iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
long grid_cols, //Col of grid
long grid_rows, //Row of grid
long border_cols, // border offset
long border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed)
{
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
long bx = blockIdx.x;
long by = blockIdx.y;
long tx = threadIdx.x;
long ty = threadIdx.y;
step_div_Cap = step / Cap;
Rx_1 = 1 / Rx;
Ry_1 = 1 / Ry;
Rz_1 = 1 / Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
long small_block_rows = BLOCK_SIZE - iteration * 2;//EXPAND_RATE
long small_block_cols = BLOCK_SIZE - iteration * 2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
long blkY = small_block_rows * by - border_rows;
long blkX = small_block_cols * bx - border_cols;
long blkYmax = blkY + BLOCK_SIZE - 1;
long blkXmax = blkX + BLOCK_SIZE - 1;
// calculate the global thread coordination
long yidx = blkY + ty;
long xidx = blkX + tx;
// load data if it is within the valid input range
long loadYidx = yidx, loadXidx = xidx;
long index = grid_cols * loadYidx + loadXidx;
if (IN_RANGE(loadYidx, 0, grid_rows - 1) && IN_RANGE(loadXidx, 0, grid_cols - 1))
{
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index]; // Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
long validYmin = (blkY < 0) ? -blkY : 0;
long validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
long validXmin = (blkX < 0) ? -blkX : 0;
long validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
long N = ty-1;
long S = ty+1;
long W = tx-1;
long E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (long i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower, float *MatrixTemp[2], long col, long row, \
long total_iterations, long num_iterations, long blockCols, long blockRows, long borderCols, long borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed = 0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t += num_iterations)
{
int temp = src;
src = dst;
dst = temp;
calculate_temp<<< dimGrid, dimBlock >>>(MIN(num_iterations, total_iterations-t), MatrixPower, MatrixTemp[src], MatrixTemp[dst], \
col, row, borderCols, borderRows, Cap, Rx, Ry, Rz, step, time_elapsed);
}
CUDA_CALL_SAFE(cudaDeviceSynchronize());
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char **argv)
{
size_t size;
long grid_rows, grid_cols;
float *FilesavingTemp, *FilesavingPower, *MatrixOut;
char *tfile, *pfile, *ofile;
long total_iterations = 60;
long pyramid_height = 1; // number of iterations
struct timeval tv_start, tv_end;
double kernel_time = 0; // in ms
double writefile_time = 0; // in ms
double readfile_time = 0; // in ms
double d2h_memcpy_time = 0; // in ms
double h2d_memcpy_time = 0; // in ms
if (argc != 7)
usage(argc, argv);
if((grid_rows = atol(argv[1]))<=0||
(grid_cols = atol(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile = argv[4];
pfile = argv[5];
ofile = argv[6];
size = grid_rows * grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
long borderCols = (pyramid_height)*EXPAND_RATE/2;
long borderRows = (pyramid_height)*EXPAND_RATE/2;
long smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
long smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
long blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
long blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *)malloc(size * sizeof(float));
FilesavingPower = (float *)malloc(size * sizeof(float));
MatrixOut = (float *)calloc(size, sizeof(float));
if(!FilesavingPower || !FilesavingTemp || !MatrixOut)
{
fprintf(stderr, "unable to allocate memory\n");
abort();
exit(EXIT_FAILURE);
}
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
gettimeofday(&tv_start, NULL);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
gettimeofday(&tv_end, NULL);
readfile_time += time_diff(tv_start, tv_end);
float *MatrixTemp[2], *MatrixPower;
CUDA_CALL_SAFE(cudaMalloc((void **)&MatrixTemp[0], sizeof(float) * size));
CUDA_CALL_SAFE(cudaMalloc((void **)&MatrixTemp[1], sizeof(float) * size));
gettimeofday(&tv_start, NULL);
CUDA_CALL_SAFE(cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float) * size, cudaMemcpyHostToDevice));
gettimeofday(&tv_end, NULL);
h2d_memcpy_time += time_diff(tv_start, tv_end);
CUDA_CALL_SAFE(cudaMalloc((void **)&MatrixPower, sizeof(float) * size));
gettimeofday(&tv_start, NULL);
CUDA_CALL_SAFE(cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float) * size, cudaMemcpyHostToDevice));
gettimeofday(&tv_end, NULL);
h2d_memcpy_time += time_diff(tv_start, tv_end);
printf("Start computing the transient temperature\n");
gettimeofday(&tv_start, NULL);
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
gettimeofday(&tv_end, NULL);
kernel_time += time_diff(tv_start, tv_end);
printf("Ending simulation\n");
gettimeofday(&tv_start, NULL);
CUDA_CALL_SAFE(cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float) * size, cudaMemcpyDeviceToHost));
gettimeofday(&tv_end, NULL);
d2h_memcpy_time += time_diff(tv_start, tv_end);
gettimeofday(&tv_start, NULL);
writeoutput(MatrixOut, grid_rows, grid_cols, ofile);
gettimeofday(&tv_end, NULL);
writefile_time += time_diff(tv_start, tv_end);
CUDA_CALL_SAFE(cudaFree(MatrixPower));
CUDA_CALL_SAFE(cudaFree(MatrixTemp[0]));
CUDA_CALL_SAFE(cudaFree(MatrixTemp[1]));
free(MatrixOut);
printf("==> header: kernel_time (ms),writefile_time (ms),d2h_memcpy_time (ms),readfile_time (ms),h2d_memcpy_time (ms)\n");
printf("==> data: %f,%f,%f,%f,%f\n", kernel_time, writefile_time, d2h_memcpy_time, readfile_time, h2d_memcpy_time);
}
|
dc5683447066bd0dd94b70065983ad34239aff41.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "adam_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *m = NULL;
hipMalloc(&m, XSIZE*YSIZE);
float *v = NULL;
hipMalloc(&v, XSIZE*YSIZE);
float B1 = 1;
float B2 = 1;
float rate = 1;
float eps = 1;
int t = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
adam_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,m,v,B1,B2,rate,eps,t);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
adam_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,m,v,B1,B2,rate,eps,t);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
adam_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,m,v,B1,B2,rate,eps,t);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | dc5683447066bd0dd94b70065983ad34239aff41.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "adam_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *m = NULL;
cudaMalloc(&m, XSIZE*YSIZE);
float *v = NULL;
cudaMalloc(&v, XSIZE*YSIZE);
float B1 = 1;
float B2 = 1;
float rate = 1;
float eps = 1;
int t = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
adam_kernel<<<gridBlock,threadBlock>>>(N,x,m,v,B1,B2,rate,eps,t);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
adam_kernel<<<gridBlock,threadBlock>>>(N,x,m,v,B1,B2,rate,eps,t);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
adam_kernel<<<gridBlock,threadBlock>>>(N,x,m,v,B1,B2,rate,eps,t);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
df526242eac17d4b72a2fd66740b4a1a679e12cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pooling_layer.h"
__global__ void MaxPoolForward(const int nthreads,
const float* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
float maxval = -FLT_MAX;
int maxidx = -1;
const float* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
}
}
__global__ void AvePoolForward(const int nthreads,
const float* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
float aveval = 0;
const float* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
void PoolingLayer::MaxPoolForward_gpu(const int nthreads, const float* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* const top_data)
{
hipLaunchKernelGGL(( MaxPoolForward), dim3(GET_BLOCKS(nthreads)), dim3(CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, num, channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, top_data);
}
void PoolingLayer::AvePoolForward_gpu(const int nthreads, const float* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* const top_data)
{
hipLaunchKernelGGL(( AvePoolForward), dim3(GET_BLOCKS(nthreads)), dim3(CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, num, channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, top_data);
} | df526242eac17d4b72a2fd66740b4a1a679e12cb.cu | #include "pooling_layer.h"
__global__ void MaxPoolForward(const int nthreads,
const float* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
float maxval = -FLT_MAX;
int maxidx = -1;
const float* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
}
}
__global__ void AvePoolForward(const int nthreads,
const float* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
float aveval = 0;
const float* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
void PoolingLayer::MaxPoolForward_gpu(const int nthreads, const float* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* const top_data)
{
MaxPoolForward<<<GET_BLOCKS(nthreads), CUDA_NUM_THREADS>>>(nthreads, bottom_data, num, channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, top_data);
}
void PoolingLayer::AvePoolForward_gpu(const int nthreads, const float* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* const top_data)
{
AvePoolForward<<<GET_BLOCKS(nthreads), CUDA_NUM_THREADS>>>(nthreads, bottom_data, num, channels,
height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, top_data);
} |
909a320dd048ba241996474d62f69b6c3e79eefc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/solver/rmsprop.hpp>
#include "./mixed_precision_training.cuh"
#include "./weight_decay.cuh"
namespace nbla {
template <typename T>
__global__ void kernel_rmsprop_update(const int num, T *data, const T *grad,
T *e_sqr_grad, const float lr,
const float decay, const float eps) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
e_sqr_grad[idx] =
e_sqr_grad[idx] * decay + grad[idx] * grad[idx] * (1 - decay);
data[idx] -= lr * grad[idx] / (sqrt(e_sqr_grad[idx]) + eps);
}
}
template <typename T>
void RMSpropCuda<T>::update_impl(const string &key, VariablePtr param) {
Size_t size = param->size();
VariablePtr state = this->state_.at(key);
T *e_sqr_grad = state->cast_data_and_get_pointer<T>(this->ctx_);
const T *grad = param->get_grad_pointer<T>(this->ctx_);
T *data = param->cast_data_and_get_pointer<T>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_rmsprop_update, size, data, grad,
e_sqr_grad, this->lr_, this->decay_,
this->eps_);
}
NBLA_DEF_WEIGHT_DECAY(RMSpropCuda, weight_decay_cuda);
NBLA_DEF_CHECK_INF_GRAD(RMSpropCuda, check_inf_grad_cuda);
NBLA_DEF_CHECK_NAN_GRAD(RMSpropCuda, check_nan_grad_cuda);
NBLA_DEF_CHECK_INF_OR_NAN_GRAD(RMSpropCuda, check_inf_or_nan_grad_cuda);
NBLA_DEF_SCALE_GRAD(RMSpropCuda, scale_grad_impl_cuda);
}
| 909a320dd048ba241996474d62f69b6c3e79eefc.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/solver/rmsprop.hpp>
#include "./mixed_precision_training.cuh"
#include "./weight_decay.cuh"
namespace nbla {
template <typename T>
__global__ void kernel_rmsprop_update(const int num, T *data, const T *grad,
T *e_sqr_grad, const float lr,
const float decay, const float eps) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
e_sqr_grad[idx] =
e_sqr_grad[idx] * decay + grad[idx] * grad[idx] * (1 - decay);
data[idx] -= lr * grad[idx] / (sqrt(e_sqr_grad[idx]) + eps);
}
}
template <typename T>
void RMSpropCuda<T>::update_impl(const string &key, VariablePtr param) {
Size_t size = param->size();
VariablePtr state = this->state_.at(key);
T *e_sqr_grad = state->cast_data_and_get_pointer<T>(this->ctx_);
const T *grad = param->get_grad_pointer<T>(this->ctx_);
T *data = param->cast_data_and_get_pointer<T>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_rmsprop_update, size, data, grad,
e_sqr_grad, this->lr_, this->decay_,
this->eps_);
}
NBLA_DEF_WEIGHT_DECAY(RMSpropCuda, weight_decay_cuda);
NBLA_DEF_CHECK_INF_GRAD(RMSpropCuda, check_inf_grad_cuda);
NBLA_DEF_CHECK_NAN_GRAD(RMSpropCuda, check_nan_grad_cuda);
NBLA_DEF_CHECK_INF_OR_NAN_GRAD(RMSpropCuda, check_inf_or_nan_grad_cuda);
NBLA_DEF_SCALE_GRAD(RMSpropCuda, scale_grad_impl_cuda);
}
|
cbacc721669259253c7bab82f812a32e33e9b119.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void findMaxIndMultipleDetector(float *input, int* maxInd, int size)
{
int maxIndex = 0;
int count = 1;
for (int i = 1; i < size; i++){
if (input[maxIndex] < input[i]){
maxIndex = i;
count = 1;
}
else if (input[maxIndex] == input[i]){
count++;
}
}
if(count>1)
maxInd[0] = -1;
else
maxInd[0] = maxIndex;
} | cbacc721669259253c7bab82f812a32e33e9b119.cu | #include "includes.h"
__global__ void findMaxIndMultipleDetector(float *input, int* maxInd, int size)
{
int maxIndex = 0;
int count = 1;
for (int i = 1; i < size; i++){
if (input[maxIndex] < input[i]){
maxIndex = i;
count = 1;
}
else if (input[maxIndex] == input[i]){
count++;
}
}
if(count>1)
maxInd[0] = -1;
else
maxInd[0] = maxIndex;
} |
a196dcb9f119d44a0b162c8d6c8948d40d4b6e1e.hip | // !!! This is a file automatically generated by hipify!!!
//------------------------------------------------------------------------
/*
Authors:
Micha oczek
Pawe Lipir
*/
//-----------------------------------------------------------------------
#include <stdlib.h>
#include <stdio.h>
#include <limits>
#include <hip/hip_runtime.h>
#include "Matrix.h"
#include "CudaFunctions.h"
#include "MatrixCalculator.h"
int main(int argc, char **argv)
{
MatrixCalculator Calculator; //Matrix Calculator manages all operations on matrices
std::cout<<"DEMO OF COMPUTING PROGRAM"<<std::endl;
//Simple interface to set all parameters and manage program
char operation = '1';
while (operation != '0')
{
std::cout<<"Choose operation: \n"
"0 - Exit\n"
"1 - Addition\n"
"2 - Subtraction\n"
"3 - Multiplication\n"
"4 - Transposition\n"
"5 - VectorSum\n"
<<std::endl;
if (!std::cin)
{
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
}
//Get information from user
std::cin>>operation;
std::cout<<std::endl;
//Case statement to menu management
switch(operation)
{
case '0':
break;
case '1':
Calculator.MatrixAddition();
break;
case '2':
Calculator.MatrixSubtraction();
break;
case '3':
Calculator.MatrixMultiplication();
break;
case '4':
Calculator.MatrixTransposition();
break;
case '5':
Calculator.VectorSummation();
break;
default:
printf("Wrong input\n");
}
}
}
| a196dcb9f119d44a0b162c8d6c8948d40d4b6e1e.cu | //------------------------------------------------------------------------
/*
Authors:
Michał Żoczek
Paweł Lipiór
*/
//-----------------------------------------------------------------------
#include <stdlib.h>
#include <stdio.h>
#include <limits>
#include <cuda_runtime.h>
#include "Matrix.h"
#include "CudaFunctions.h"
#include "MatrixCalculator.h"
int main(int argc, char **argv)
{
MatrixCalculator Calculator; //Matrix Calculator manages all operations on matrices
std::cout<<"DEMO OF COMPUTING PROGRAM"<<std::endl;
//Simple interface to set all parameters and manage program
char operation = '1';
while (operation != '0')
{
std::cout<<"Choose operation: \n"
"0 - Exit\n"
"1 - Addition\n"
"2 - Subtraction\n"
"3 - Multiplication\n"
"4 - Transposition\n"
"5 - VectorSum\n"
<<std::endl;
if (!std::cin)
{
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
}
//Get information from user
std::cin>>operation;
std::cout<<std::endl;
//Case statement to menu management
switch(operation)
{
case '0':
break;
case '1':
Calculator.MatrixAddition();
break;
case '2':
Calculator.MatrixSubtraction();
break;
case '3':
Calculator.MatrixMultiplication();
break;
case '4':
Calculator.MatrixTransposition();
break;
case '5':
Calculator.VectorSummation();
break;
default:
printf("Wrong input\n");
}
}
}
|
55b7c1b5c21c5ee7136c981403a762347b044a08.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
// Define and implement the GPU addition function
// This version is a vector addition, with N threads
// and one block.
// Adding one a and b instance and storing in one c instance.
__global__ void add(int *a, int *b, int *c)
{
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
// Nmber of blocks
#define N 512
int main()
{
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N* sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Allocate memory for the host a, b, and c arrays
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
// Store known values in the a and b arrays
for (int i = 0; i < N; ++i)
{
a[i] = 10*i;
b[i] = 20*i;
}
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU with N threads on 1 block
hipLaunchKernelGGL(( add), dim3(1),dim3(N), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
// Print results
for (int i = 0; i < N; ++i)
{
std::cout << "sum[" << i << "] is " << c[i] << std::endl;
}
// Cleanup
free(a);
free(b);
free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
| 55b7c1b5c21c5ee7136c981403a762347b044a08.cu | #include <iostream>
#include <cuda_runtime_api.h>
#include <cuda.h>
// Define and implement the GPU addition function
// This version is a vector addition, with N threads
// and one block.
// Adding one a and b instance and storing in one c instance.
__global__ void add(int *a, int *b, int *c)
{
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
// Nmber of blocks
#define N 512
int main()
{
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N* sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Allocate memory for the host a, b, and c arrays
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
// Store known values in the a and b arrays
for (int i = 0; i < N; ++i)
{
a[i] = 10*i;
b[i] = 20*i;
}
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with N threads on 1 block
add<<<1,N>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Print results
for (int i = 0; i < N; ++i)
{
std::cout << "sum[" << i << "] is " << c[i] << std::endl;
}
// Cleanup
free(a);
free(b);
free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
c67b5c2d0f4a4f6f1ef124ac1428950f3bf046cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
| c67b5c2d0f4a4f6f1ef124ac1428950f3bf046cb.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
|
d23a5733f0859f3d184888d7696b826751909cbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "funset.hpp"
#include <iostream>
#include <chrono>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: ;;,3.2
;void;,
;,
gridblock,(<<< >>>);
a kernel,(GPUCUDAkernel(
),__global__);*/
__global__ static void bgr2gray(const unsigned char* src, int B2Y, int G2Y, int R2Y, int shift, int width, int height, unsigned char* dst)
{
/* gridDim: ,,,
,,.
grid,dim3
blockDim: ,block.dim3,
block;,,
;
blockIdx: ,;
threadblockgrid,blockIdx.x
[0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3,
blockgrid;
threadIdx: ,;
threadblock;threadIdx.x,
threadIdx.y,threadIdx.z;uint3
,threadblock */
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//if (x == 0 && y == 0) {
// printf("%d, %d, %d, %d, %d, %d\n", width, height, B2Y, G2Y, R2Y, shift);
//}
if (x < width && y < height) {
dst[y * width + x] = (unsigned char)((src[y*width * 3 + 3 * x + 0] * B2Y +
src[y*width * 3 + 3 * x + 1] * G2Y + src[y*width * 3 + 3 * x + 2] * R2Y) >> shift);
}
}
int bgr2gray_gpu(const unsigned char* src, int width, int height, unsigned char* dst, float* elapsed_time)
{
const int R2Y{ 4899 }, G2Y{ 9617 }, B2Y{ 1868 }, yuv_shift{ 14 };
unsigned char *dev_src{ nullptr }, *dev_dst{ nullptr };
// hipMalloc:
hipMalloc(&dev_src, width * height * 3 * sizeof(unsigned char));
hipMalloc(&dev_dst, width * height * sizeof(unsigned char));
/* hipMemcpy: ,:
(1). hipMemcpyHostToHost:
(2). hipMemcpyHostToDevice:
(3). hipMemcpyDeviceToHost:
(4). hipMemcpyDeviceToDevice:
(5). hipMemcpyDefault: ,
(CUDA6.0)
cudaMemcpy */
hipMemcpy(dev_src, src, width * height * 3 * sizeof(unsigned char), hipMemcpyHostToDevice);
/* hipMemset: ,GPU
*/
hipMemset(dev_dst, 0, width * height * sizeof(unsigned char));
TIME_START_GPU
/* dim3: uint33unsigned int
dim3
1 */
// Note1024threads.x*threads.y1024
dim3 threads(32, 32);
dim3 blocks((width + 31) / 32, (height + 31) / 32);
/* <<< >>>: CUDA,,
CUDA,,
;,
,,
;;
kernel,kernel,
GPU, ;
API,<<<Dg,Db,Ns,S>>>
,Dgdim3,grid
.Dg,gridDg.x*Dg.y*Dg.zblock;Db
dim3,block.Db,
blockDb.x*Db.y*Db.zthread;Nssize_t,
,
(extern __shared__);Ns,0;S
cudaStream_t,.S,0. */
// Note: vectordata()cudaMalloccudaMemcpyvector
bgr2gray << <blocks, threads >> >(dev_src, B2Y, G2Y, R2Y, yuv_shift, width, height, dev_dst);
/* hipDeviceSynchronize: kernel, ,
cudaDeviceSynchronize; ,
,,
,,
,cudaDeviceSynchronize
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
hipDeviceSynchronize();
TIME_END_GPU
hipMemcpy(dst, dev_dst, width * height * sizeof(unsigned char), hipMemcpyDeviceToHost);
// hipFree: cudaMalloc
hipFree(dev_dst);
hipFree(dev_src);
return 0;
}
| d23a5733f0859f3d184888d7696b826751909cbd.cu | #include "funset.hpp"
#include <iostream>
#include <chrono>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在
设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在
设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在
设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符);
a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函
数),内核函数必须通过__global__函数类型限定符定义);*/
__global__ static void bgr2gray(const unsigned char* src, int B2Y, int G2Y, int R2Y, int shift, int width, int height, unsigned char* dst)
{
/* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个
变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量.
一个grid为三维,为dim3类型;
blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含
了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数,
保存的是线程块中每一维的线程数量;
blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用
于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是
[0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型,
包含了一个block在grid中各个维度上的索引信息;
threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于
说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果
是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类
型,包含了一个thread在block中各个维度的索引信息 */
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//if (x == 0 && y == 0) {
// printf("%d, %d, %d, %d, %d, %d\n", width, height, B2Y, G2Y, R2Y, shift);
//}
if (x < width && y < height) {
dst[y * width + x] = (unsigned char)((src[y*width * 3 + 3 * x + 0] * B2Y +
src[y*width * 3 + 3 * x + 1] * G2Y + src[y*width * 3 + 3 * x + 2] * R2Y) >> shift);
}
}
int bgr2gray_gpu(const unsigned char* src, int width, int height, unsigned char* dst, float* elapsed_time)
{
const int R2Y{ 4899 }, G2Y{ 9617 }, B2Y{ 1868 }, yuv_shift{ 14 };
unsigned char *dev_src{ nullptr }, *dev_dst{ nullptr };
// cudaMalloc: 在设备端分配内存
cudaMalloc(&dev_src, width * height * 3 * sizeof(unsigned char));
cudaMalloc(&dev_dst, width * height * sizeof(unsigned char));
/* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一:
(1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端
(2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端
(3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端
(4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端
(5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持
统一虚拟寻址(CUDA6.0及以上版本)
cudaMemcpy函数对于主机是同步的 */
cudaMemcpy(dev_src, src, width * height * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
/* cudaMemset: 存储器初始化函数,在GPU内存上执行。用指定的值初始化或设置
设备内存 */
cudaMemset(dev_dst, 0, width * height * sizeof(unsigned char));
TIME_START_GPU
/* dim3: 基于uint3定义的内置矢量类型,相当于由3个unsigned int类型组成的
结构体,可表示一个三维数组,在定义dim3类型变量时,凡是没有赋值的元素都
会被赋予默认值1 */
// Note:每一个线程块支持的最大线程数量为1024,即threads.x*threads.y必须小于等于1024
dim3 threads(32, 32);
dim3 blocks((width + 31) / 32, (height + 31) / 32);
/* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参
数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何
组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何
启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函
数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须
先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在
GPU计算时会发生错误,例如越界等 ;
使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>>
的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个
维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是
一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个
block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调
用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组
(extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为
cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */
// Note: 核函数不支持传入参数为vector的data()指针,需要cudaMalloc和cudaMemcpy,因为vector是在主机内存中
bgr2gray << <blocks, threads >> >(dev_src, B2Y, G2Y, R2Y, yuv_shift, width, height, dev_dst);
/* cudaDeviceSynchronize: kernel的启动是异步的, 为了定位它是否出错, 一
般需要加上cudaDeviceSynchronize函数进行同步; 将会一直处于阻塞状态,直到
前面所有请求的任务已经被全部执行完毕,如果前面执行的某个任务失败,将会
返回一个错误;当程序中有多个流,并且流之间在某一点需要通信时,那就必须
在这一点处加上同步的语句,即cudaDeviceSynchronize;异步启动
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
cudaDeviceSynchronize();
TIME_END_GPU
cudaMemcpy(dst, dev_dst, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost);
// cudaFree: 释放设备上由cudaMalloc函数分配的内存
cudaFree(dev_dst);
cudaFree(dev_src);
return 0;
}
|
8cd66d80501f11673d4968aa0ac0f2a7891befbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<math.h>
#include<iostream>
#include "gloveparser.cuh"
#include <stdio.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "point.h"
#include <time.h>
const int MAX_THREADS = 1024;
int calculateThreads(int querypoints) {
if (querypoints < MAX_THREADS) return querypoints;
else return MAX_THREADS;
}
int calculateBlocks(int querypoints) {
if (querypoints < MAX_THREADS) return 1;
else return ceil(querypoints / (float)MAX_THREADS);
}
__global__
void add(int n_data, int n_query, int dimensions, int k, float *queryPoints, float *dataPoints, Point *results) {
int queryIndex = blockIdx.x *blockDim.x + threadIdx.x;
if (queryIndex < n_query) {
float dotProduct;
int index = queryIndex * dimensions;
int result_index = queryIndex * k;
//#pragma unroll;
for (int i = 0; i < n_data; i++) {
float dotProduct = 0;
/*for (int j = 0; j < dimensions; j++) {
dotProduct += queryPoints[index + j] * dataPoints[dimensions*i + j];
}*/
float angular_distance = -(i);
Point currentPoint;
currentPoint.ID = i;
currentPoint.distance = angular_distance;
Point swapPoint;
for (int j = 0; (j < k && j <= i); j++) { // simple sorting.
if (results[result_index + j].distance > currentPoint.distance) {
swapPoint = results[result_index + j];
results[result_index + j] = currentPoint;
currentPoint = swapPoint;
}
}
}
}
}
Point* runSimpleLinearScan(int k, int d, int N_query, int N_data, float* data, float* queries) {
//Data specific elements. Are read from datafiles.
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Is there a CUDA-capable GPU installed?");
throw "Error in simpleLinearScan run.";
}
Point *z;
z = (Point*)malloc(k*N_query * sizeof(Point));
for (int i = 0; i < k * N_query; i++) {
Point p;
p.ID = -1;
p.distance = 2.0f; //fill z array with default max value - given sim [-1,1]
z[i] = p;
}
float* dev_x = 0;
float* dev_y = 0;
Point* dev_z = 0;
hipMalloc((void**)&dev_x, N_query * d * sizeof(float));
hipMalloc((void**)&dev_y, N_data * d * sizeof(float));
hipMalloc((void**)&dev_z, k * N_query * sizeof(Point));
hipMemcpy(dev_x, queries, N_query * d * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_y, data, N_data * d * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_z, z, k * N_query * sizeof(Point), hipMemcpyHostToDevice);
// initialize x and y arrays on the host
int threads = calculateThreads(N_query);
int blocks = calculateBlocks(N_query);
printf("Threads: %d\n", threads);
printf("Blocks: %d\n", blocks);
clock_t before = clock();
add << <blocks, threads >> > (N_data, N_query, d, k, dev_x, dev_y, dev_z);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
throw "Error in simpleLinearScan run.";
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
throw "Error in simpleLinearScan run.";
}
clock_t time_lapsed = clock() - before;
printf("Time calculate on the GPU: %d \n", (time_lapsed * 1000 / CLOCKS_PER_SEC));
cudaStatus = hipMemcpy(z, dev_z, k * N_query * sizeof(Point), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "cuda memcpy from device to host returned error code %d \n", cudaStatus);
throw "Error in simpleLinearScan run.";
}
for (int i = 0; i < 4; i++) {
printf("Query: %d\n", i);
for (int j = 0; j < k; j++) {
printf("ID: %d dist: %f\n", z[j + i*k].ID, z[j +i*k].distance);
}
}
printf("Done. \n");
//Free memory...
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_z);
cudaStatus = hipDeviceReset();
return z;
} | 8cd66d80501f11673d4968aa0ac0f2a7891befbc.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<math.h>
#include<iostream>
#include "gloveparser.cuh"
#include <stdio.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "point.h"
#include <time.h>
const int MAX_THREADS = 1024;
int calculateThreads(int querypoints) {
if (querypoints < MAX_THREADS) return querypoints;
else return MAX_THREADS;
}
int calculateBlocks(int querypoints) {
if (querypoints < MAX_THREADS) return 1;
else return ceil(querypoints / (float)MAX_THREADS);
}
__global__
void add(int n_data, int n_query, int dimensions, int k, float *queryPoints, float *dataPoints, Point *results) {
int queryIndex = blockIdx.x *blockDim.x + threadIdx.x;
if (queryIndex < n_query) {
float dotProduct;
int index = queryIndex * dimensions;
int result_index = queryIndex * k;
//#pragma unroll;
for (int i = 0; i < n_data; i++) {
float dotProduct = 0;
/*for (int j = 0; j < dimensions; j++) {
dotProduct += queryPoints[index + j] * dataPoints[dimensions*i + j];
}*/
float angular_distance = -(i);
Point currentPoint;
currentPoint.ID = i;
currentPoint.distance = angular_distance;
Point swapPoint;
for (int j = 0; (j < k && j <= i); j++) { // simple sorting.
if (results[result_index + j].distance > currentPoint.distance) {
swapPoint = results[result_index + j];
results[result_index + j] = currentPoint;
currentPoint = swapPoint;
}
}
}
}
}
Point* runSimpleLinearScan(int k, int d, int N_query, int N_data, float* data, float* queries) {
//Data specific elements. Are read from datafiles.
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Is there a CUDA-capable GPU installed?");
throw "Error in simpleLinearScan run.";
}
Point *z;
z = (Point*)malloc(k*N_query * sizeof(Point));
for (int i = 0; i < k * N_query; i++) {
Point p;
p.ID = -1;
p.distance = 2.0f; //fill z array with default max value - given sim [-1,1]
z[i] = p;
}
float* dev_x = 0;
float* dev_y = 0;
Point* dev_z = 0;
cudaMalloc((void**)&dev_x, N_query * d * sizeof(float));
cudaMalloc((void**)&dev_y, N_data * d * sizeof(float));
cudaMalloc((void**)&dev_z, k * N_query * sizeof(Point));
cudaMemcpy(dev_x, queries, N_query * d * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, data, N_data * d * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_z, z, k * N_query * sizeof(Point), cudaMemcpyHostToDevice);
// initialize x and y arrays on the host
int threads = calculateThreads(N_query);
int blocks = calculateBlocks(N_query);
printf("Threads: %d\n", threads);
printf("Blocks: %d\n", blocks);
clock_t before = clock();
add << <blocks, threads >> > (N_data, N_query, d, k, dev_x, dev_y, dev_z);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
throw "Error in simpleLinearScan run.";
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
throw "Error in simpleLinearScan run.";
}
clock_t time_lapsed = clock() - before;
printf("Time calculate on the GPU: %d \n", (time_lapsed * 1000 / CLOCKS_PER_SEC));
cudaStatus = cudaMemcpy(z, dev_z, k * N_query * sizeof(Point), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cuda memcpy from device to host returned error code %d \n", cudaStatus);
throw "Error in simpleLinearScan run.";
}
for (int i = 0; i < 4; i++) {
printf("Query: %d\n", i);
for (int j = 0; j < k; j++) {
printf("ID: %d dist: %f\n", z[j + i*k].ID, z[j +i*k].distance);
}
}
printf("Done. \n");
//Free memory...
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
cudaStatus = cudaDeviceReset();
return z;
} |
1cf0d35de21ed00f3b3f622159535b1eee036f73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include "globals.h"
#include "cuda_functions.h"
#include "cuda_math.h"
#include "boundary.h"
/*
* The L-versions of the RHS have to be ran with
* - the L-version of the derivatives
* i.e.: derDev1xL instead of derDev1x
* - the L-version of the grid
* i.e.: h_gridL[0] instead of h_grid[0]
*/
/* The whole RHS in the X direction is calculated in RHSDeviceSharedFlxX_old thanks to the beneficial memory layout that allows to use small pencils */
/* For the Y and Z direction, fluxes require a small pencil discretization while the rest of the RHS can be calculated on large pencils which speed
* up significantly the computation. Therefore 5 streams are used
* stream 0 -> complete X RHS (in RHSDeviceSharedFlxX_old) (small pencil grid)
* stream 1 -> viscous terms and pressure terms in Y (in RHSDeviceFullYL) (large pencil grid)
* stream 2 -> viscous terms and pressure terms in Z (in RHSDeviceFullZL) (large pencil grid)
* stream 3 -> advective fluxes in Y direction (in FLXDeviceY) (small pencil transposed grid)
* stream 4 -> advective fluxes in Z direction (in FLXDeviceZ) (small pencil transposed grid)*/
__global__ void RHSDeviceSharedFlxX(myprec *rX, myprec *uX, myprec *vX, myprec *wX, myprec *eX,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil, myprec dpdz) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidX();
int si = id.i + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rXtmp=0;
myprec uXtmp=0;
myprec vXtmp=0;
myprec wXtmp=0;
myprec eXtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][mx+stencilSize*2];
__shared__ myprec s_u[sPencils][mx+stencilSize*2];
__shared__ myprec s_v[sPencils][mx+stencilSize*2];
__shared__ myprec s_w[sPencils][mx+stencilSize*2];
__shared__ myprec s_h[sPencils][mx+stencilSize*2];
__shared__ myprec s_t[sPencils][mx+stencilSize*2];
__shared__ myprec s_p[sPencils][mx+stencilSize*2];
__shared__ myprec s_m[sPencils][mx+stencilSize*2];
__shared__ myprec s_l[sPencils][mx+stencilSize*2];
#if !periodicX
__shared__ myprec s_s0[sPencils][mx+stencilSize*2];
__shared__ myprec s_s4[sPencils][mx+stencilSize*2];
__shared__ myprec s_s8[sPencils][mx+stencilSize*2];
#endif
__shared__ myprec s_dil[sPencils][mx+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
#if !periodicX
s_s0[sj][si]= sij[0][id.g];
s_s4[sj][si]= sij[4][id.g];
s_s8[sj][si]= sij[8][id.g];
#endif
s_dil[sj][si] = dil[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.i < stencilSize) {
#if periodicX
perBCx(s_r[sj],si); perBCx(s_u[sj],si);
perBCx(s_v[sj],si); perBCx(s_w[sj],si);
perBCx(s_h[sj],si); perBCx(s_t[sj],si);
perBCx(s_p[sj],si); perBCx(s_m[sj],si);
perBCx(s_l[sj],si);
#else
wallBCxMir(s_p[sj],si);
wallBCxVel(s_u[sj],si); wallBCxVel(s_v[sj],si); wallBCxVel(s_w[sj],si);
wallBCxExt(s_t[sj],si,1.0,1.0);
stateBoundPT(s_r[sj], s_t[sj], s_u[sj], s_v[sj], s_w[sj], s_h[sj], s_p[sj], s_m[sj], s_l[sj], si);
wallBCxMir(s_s0[sj],si); wallBCxVel(s_s4[sj],si); wallBCxVel(s_s8[sj],si);
#endif
}
__syncthreads();
//initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms
uXtmp = ( 2 * sij[0][id.g] - 2./3.*s_dil[sj][si] );
vXtmp = ( sij[1][id.g] + sij[3][id.g] );
wXtmp = ( sij[2][id.g] + sij[6][id.g] );
//adding the viscous dissipation part duidx*mu*six
eXtmp = s_m[sj][si]*(uXtmp*sij[0][id.g] + vXtmp*sij[1][id.g] + wXtmp*sij[2][id.g]);
//Adding here the terms d (mu) dx * sxj; (lambda in case of h in rhse);
derDevSharedV1x(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx
uXtmp *= wrk2;
vXtmp *= wrk2;
wXtmp *= wrk2;
// viscous fluxes derivative mu*d^2ui dx^2
derDevSharedV2x(&wrk1,s_u[sj],si);
uXtmp = uXtmp + wrk1*s_m[sj][si];
derDevSharedV2x(&wrk1,s_v[sj],si);
vXtmp = vXtmp + wrk1*s_m[sj][si];
derDevSharedV2x(&wrk1,s_w[sj],si);
wXtmp = wXtmp + wrk1*s_m[sj][si];
//adding the viscous dissipation part ui*(mu * d2duidx2 + dmudx * six)
eXtmp = eXtmp + s_u[sj][si]*uXtmp + s_v[sj][si]*vXtmp + s_w[sj][si]*wXtmp;
//adding the molecular conduction part (d2 temp dx2*lambda + dlambda dx * d temp dx)
derDevSharedV2x(&wrk1,s_t[sj],si);
eXtmp = eXtmp + wrk1*s_l[sj][si];
derDevSharedV1x(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx
derDevSharedV1x(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eXtmp = eXtmp + wrk1*wrk2;
//Adding here the terms - d (ru phi) dx;
fluxQuadSharedx(&wrk1,s_r[sj],s_u[sj],si);
rXtmp = wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_u[sj],si);
uXtmp = uXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_v[sj],si);
vXtmp = vXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_w[sj],si);
wXtmp = wXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_h[sj],si);
eXtmp = eXtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
if (id.i < stencilSize) {
#if periodicX
perBCx(s_dil[sj],si);
#else
wallBCxDil(s_dil[sj],s_s0[sj],s_s4[sj],s_s8[sj],si);
#endif
}
__syncthreads();
derDevSharedV1x(&wrk2,s_dil[sj],si);
derDevShared1x(&wrk1 ,s_p[sj],si);
uXtmp = uXtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ;
eXtmp = eXtmp + s_m[sj][si]*wrk2/3.0*s_u[sj][si];
rX[id.g] = rXtmp;
uX[id.g] = uXtmp;
vX[id.g] = vXtmp;
wX[id.g] = wXtmp;
eX[id.g] = eXtmp ;
}
__global__ void RHSDeviceSharedFlxY(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil, myprec dpdz) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidYFlx();
int si = id.j + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rYtmp=0;
myprec uYtmp=0;
myprec vYtmp=0;
myprec wYtmp=0;
myprec eYtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][my+stencilSize*2];
__shared__ myprec s_u[sPencils][my+stencilSize*2];
__shared__ myprec s_v[sPencils][my+stencilSize*2];
__shared__ myprec s_w[sPencils][my+stencilSize*2];
__shared__ myprec s_h[sPencils][my+stencilSize*2];
__shared__ myprec s_t[sPencils][my+stencilSize*2];
__shared__ myprec s_p[sPencils][my+stencilSize*2];
__shared__ myprec s_m[sPencils][my+stencilSize*2];
__shared__ myprec s_l[sPencils][my+stencilSize*2];
__shared__ myprec s_s3[sPencils][my+stencilSize*2];
__shared__ myprec s_s4[sPencils][my+stencilSize*2];
__shared__ myprec s_s5[sPencils][my+stencilSize*2];
__shared__ myprec s_dil[sPencils][my+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
s_dil[sj][si] = dil[id.g];
s_s3[sj][si] = sij[3][id.g];
s_s4[sj][si] = sij[4][id.g];
s_s5[sj][si] = sij[5][id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.j < stencilSize) {
perBCy(s_r[sj],si); perBCy(s_u[sj],si);
perBCy(s_v[sj],si); perBCy(s_w[sj],si);
perBCy(s_h[sj],si); perBCy(s_t[sj],si);
perBCy(s_p[sj],si); perBCy(s_m[sj],si);
perBCy(s_l[sj],si);
}
__syncthreads();
//initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms
uYtmp = ( s_s3[sj][si] + sij[1][id.g] ) ;
vYtmp = ( 2 * s_s4[sj][si] - 2./3.*s_dil[sj][si] ) ;
wYtmp = ( s_s5[sj][si] + sij[7][id.g] ) ;
//adding the viscous dissipation part duidy*mu*siy
eYtmp = s_m[sj][si]*(uYtmp*s_s3[sj][si] + vYtmp*s_s4[sj][si] + wYtmp*s_s5[sj][si]);
//Adding here the terms d (mu) dy * siy;
derDevSharedV1y(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx
uYtmp *= wrk2;
vYtmp *= wrk2;
wYtmp *= wrk2;
// viscous fluxes derivative mu*d^2dui dy^2
derDevSharedV2y(&wrk1,s_u[sj],si);
uYtmp = uYtmp + wrk1*s_m[sj][si];
derDevSharedV2y(&wrk1,s_v[sj],si);
vYtmp = vYtmp + wrk1*s_m[sj][si];
derDevSharedV2y(&wrk1,s_w[sj],si);
wYtmp = wYtmp + wrk1*s_m[sj][si];
//adding the viscous dissipation part ui*(mu * d2duidy2 + dmudy * siy)
eYtmp = eYtmp + s_u[sj][si]*uYtmp + s_v[sj][si]*vYtmp + s_w[sj][si]*wYtmp;
derDevSharedV2y(&wrk1,s_t[sj],si);
eYtmp = eYtmp + wrk1*s_l[sj][si];
derDevSharedV1y(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx
derDevSharedV1y(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eYtmp = eYtmp + wrk1*wrk2;
// split advection terms
//Adding here the terms - d (ru phi) dy;
fluxQuadSharedy(&wrk1,s_r[sj],s_v[sj],si);
rYtmp = wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_u[sj],si);
uYtmp = uYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_v[sj],si);
vYtmp = vYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_w[sj],si);
wYtmp = wYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_h[sj],si);
eYtmp = eYtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
if (id.j < stencilSize) {
perBCy(s_dil[sj],si);
}
__syncthreads();
derDevSharedV1y(&wrk2,s_dil[sj],si);
derDevShared1y(&wrk1,s_p[sj],si);
vYtmp = vYtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ;
eYtmp = eYtmp + s_m[sj][si]*wrk2/3.0*s_v[sj][si];
#if useStreams
rY[id.g] = rYtmp;
uY[id.g] = uYtmp;
vY[id.g] = vYtmp;
wY[id.g] = wYtmp;
eY[id.g] = eYtmp;
#else
rY[id.g] += rYtmp;
uY[id.g] += uYtmp;
vY[id.g] += vYtmp;
wY[id.g] += wYtmp;
eY[id.g] += eYtmp;
#endif
}
__global__ void RHSDeviceSharedFlxZ(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil, myprec dpdz) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidZFlx();
int si = id.k + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rZtmp=0;
myprec uZtmp=0;
myprec vZtmp=0;
myprec wZtmp=0;
myprec eZtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][mz+stencilSize*2];
__shared__ myprec s_u[sPencils][mz+stencilSize*2];
__shared__ myprec s_v[sPencils][mz+stencilSize*2];
__shared__ myprec s_w[sPencils][mz+stencilSize*2];
__shared__ myprec s_h[sPencils][mz+stencilSize*2];
__shared__ myprec s_t[sPencils][mz+stencilSize*2];
__shared__ myprec s_p[sPencils][mz+stencilSize*2];
__shared__ myprec s_m[sPencils][mz+stencilSize*2];
__shared__ myprec s_l[sPencils][mz+stencilSize*2];
__shared__ myprec s_s6[sPencils][mz+stencilSize*2];
__shared__ myprec s_s7[sPencils][mz+stencilSize*2];
__shared__ myprec s_s8[sPencils][mz+stencilSize*2];
__shared__ myprec s_dil[sPencils][mz+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
s_s6[sj][si] = sij[6][id.g];
s_s7[sj][si] = sij[7][id.g];
s_s8[sj][si] = sij[8][id.g];
s_dil[sj][si] = dil[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.k < stencilSize) {
perBCz(s_r[sj],si); perBCz(s_u[sj],si);
perBCz(s_v[sj],si); perBCz(s_w[sj],si);
perBCz(s_h[sj],si); perBCz(s_t[sj],si);
perBCz(s_p[sj],si); perBCz(s_m[sj],si);
perBCz(s_l[sj],si);
}
__syncthreads();
//initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms
uZtmp = ( s_s6[sj][si] + sij[2][id.g] );
vZtmp = ( s_s7[sj][si] + sij[5][id.g] );
wZtmp = (2 * s_s8[sj][si] - 2./3.*s_dil[sj][si] );
//adding the viscous dissipation part duidz*mu*siz
eZtmp = s_m[sj][si]*(uZtmp*s_s6[sj][si] + vZtmp*s_s7[sj][si] + wZtmp*s_s8[sj][si]);
//Adding here the terms d (mu) dz * szj;
derDevSharedV1z(&wrk2,s_m[sj],si); //wrk2 = d (mu) dz
uZtmp *= wrk2;
vZtmp *= wrk2;
wZtmp *= wrk2;
// viscous fluxes derivative
derDevSharedV2z(&wrk1,s_u[sj],si);
uZtmp = uZtmp + wrk1*s_m[sj][si];
derDevSharedV2z(&wrk1,s_v[sj],si);
vZtmp = vZtmp + wrk1*s_m[sj][si];
derDevSharedV2z(&wrk1,s_w[sj],si);
wZtmp = wZtmp + wrk1*s_m[sj][si];
//adding the viscous dissipation part ui*(mu * d2duidz2 + dmudz * siz)
derDevSharedV2z(&wrk1,s_t[sj],si);
eZtmp = eZtmp + s_u[sj][si]*uZtmp + s_v[sj][si]*vZtmp + s_w[sj][si]*wZtmp + wrk1*s_l[sj][si];
derDevSharedV1z(&wrk2,s_l[sj],si); //wrk2 = d (lam) dz
derDevSharedV1z(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eZtmp = eZtmp + wrk1*wrk2;
//Adding here the terms - d (ru phi) dz;
fluxQuadSharedz(&wrk1,s_r[sj],s_w[sj],si);
rZtmp = wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_u[sj],si);
uZtmp = uZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_v[sj],si);
vZtmp = vZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_w[sj],si);
wZtmp = wZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_h[sj],si);
eZtmp = eZtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
__syncthreads();
if (id.k < stencilSize) {
perBCz(s_dil[sj],si);
}
__syncthreads();
derDevSharedV1z(&wrk2,s_dil[sj],si);
derDevShared1z(&wrk1,s_p[sj],si);
wZtmp = wZtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ;
eZtmp = eZtmp + s_m[sj][si]*wrk2/3.0*s_w[sj][si];
#if useStreams
rZ[id.g] = rZtmp;
uZ[id.g] = uZtmp;
vZ[id.g] = vZtmp;
wZ[id.g] = wZtmp + dpdz;
eZ[id.g] = eZtmp + dpdz*s_w[sj][si] ;
#else
rZ[id.g] += rZtmp;
uZ[id.g] += uZtmp;
vZ[id.g] += vZtmp;
wZ[id.g] += wZtmp + dpdz;
eZ[id.g] += eZtmp + dpdz*s_w[sj][si] ;
#endif
__syncthreads();
}
| 1cf0d35de21ed00f3b3f622159535b1eee036f73.cu |
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include "globals.h"
#include "cuda_functions.h"
#include "cuda_math.h"
#include "boundary.h"
/*
* The L-versions of the RHS have to be ran with
* - the L-version of the derivatives
* i.e.: derDev1xL instead of derDev1x
* - the L-version of the grid
* i.e.: h_gridL[0] instead of h_grid[0]
*/
/* The whole RHS in the X direction is calculated in RHSDeviceSharedFlxX_old thanks to the beneficial memory layout that allows to use small pencils */
/* For the Y and Z direction, fluxes require a small pencil discretization while the rest of the RHS can be calculated on large pencils which speed
* up significantly the computation. Therefore 5 streams are used
* stream 0 -> complete X RHS (in RHSDeviceSharedFlxX_old) (small pencil grid)
* stream 1 -> viscous terms and pressure terms in Y (in RHSDeviceFullYL) (large pencil grid)
* stream 2 -> viscous terms and pressure terms in Z (in RHSDeviceFullZL) (large pencil grid)
* stream 3 -> advective fluxes in Y direction (in FLXDeviceY) (small pencil transposed grid)
* stream 4 -> advective fluxes in Z direction (in FLXDeviceZ) (small pencil transposed grid)*/
__global__ void RHSDeviceSharedFlxX(myprec *rX, myprec *uX, myprec *vX, myprec *wX, myprec *eX,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil, myprec dpdz) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidX();
int si = id.i + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rXtmp=0;
myprec uXtmp=0;
myprec vXtmp=0;
myprec wXtmp=0;
myprec eXtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][mx+stencilSize*2];
__shared__ myprec s_u[sPencils][mx+stencilSize*2];
__shared__ myprec s_v[sPencils][mx+stencilSize*2];
__shared__ myprec s_w[sPencils][mx+stencilSize*2];
__shared__ myprec s_h[sPencils][mx+stencilSize*2];
__shared__ myprec s_t[sPencils][mx+stencilSize*2];
__shared__ myprec s_p[sPencils][mx+stencilSize*2];
__shared__ myprec s_m[sPencils][mx+stencilSize*2];
__shared__ myprec s_l[sPencils][mx+stencilSize*2];
#if !periodicX
__shared__ myprec s_s0[sPencils][mx+stencilSize*2];
__shared__ myprec s_s4[sPencils][mx+stencilSize*2];
__shared__ myprec s_s8[sPencils][mx+stencilSize*2];
#endif
__shared__ myprec s_dil[sPencils][mx+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
#if !periodicX
s_s0[sj][si]= sij[0][id.g];
s_s4[sj][si]= sij[4][id.g];
s_s8[sj][si]= sij[8][id.g];
#endif
s_dil[sj][si] = dil[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.i < stencilSize) {
#if periodicX
perBCx(s_r[sj],si); perBCx(s_u[sj],si);
perBCx(s_v[sj],si); perBCx(s_w[sj],si);
perBCx(s_h[sj],si); perBCx(s_t[sj],si);
perBCx(s_p[sj],si); perBCx(s_m[sj],si);
perBCx(s_l[sj],si);
#else
wallBCxMir(s_p[sj],si);
wallBCxVel(s_u[sj],si); wallBCxVel(s_v[sj],si); wallBCxVel(s_w[sj],si);
wallBCxExt(s_t[sj],si,1.0,1.0);
stateBoundPT(s_r[sj], s_t[sj], s_u[sj], s_v[sj], s_w[sj], s_h[sj], s_p[sj], s_m[sj], s_l[sj], si);
wallBCxMir(s_s0[sj],si); wallBCxVel(s_s4[sj],si); wallBCxVel(s_s8[sj],si);
#endif
}
__syncthreads();
//initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms
uXtmp = ( 2 * sij[0][id.g] - 2./3.*s_dil[sj][si] );
vXtmp = ( sij[1][id.g] + sij[3][id.g] );
wXtmp = ( sij[2][id.g] + sij[6][id.g] );
//adding the viscous dissipation part duidx*mu*six
eXtmp = s_m[sj][si]*(uXtmp*sij[0][id.g] + vXtmp*sij[1][id.g] + wXtmp*sij[2][id.g]);
//Adding here the terms d (mu) dx * sxj; (lambda in case of h in rhse);
derDevSharedV1x(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx
uXtmp *= wrk2;
vXtmp *= wrk2;
wXtmp *= wrk2;
// viscous fluxes derivative mu*d^2ui dx^2
derDevSharedV2x(&wrk1,s_u[sj],si);
uXtmp = uXtmp + wrk1*s_m[sj][si];
derDevSharedV2x(&wrk1,s_v[sj],si);
vXtmp = vXtmp + wrk1*s_m[sj][si];
derDevSharedV2x(&wrk1,s_w[sj],si);
wXtmp = wXtmp + wrk1*s_m[sj][si];
//adding the viscous dissipation part ui*(mu * d2duidx2 + dmudx * six)
eXtmp = eXtmp + s_u[sj][si]*uXtmp + s_v[sj][si]*vXtmp + s_w[sj][si]*wXtmp;
//adding the molecular conduction part (d2 temp dx2*lambda + dlambda dx * d temp dx)
derDevSharedV2x(&wrk1,s_t[sj],si);
eXtmp = eXtmp + wrk1*s_l[sj][si];
derDevSharedV1x(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx
derDevSharedV1x(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eXtmp = eXtmp + wrk1*wrk2;
//Adding here the terms - d (ru phi) dx;
fluxQuadSharedx(&wrk1,s_r[sj],s_u[sj],si);
rXtmp = wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_u[sj],si);
uXtmp = uXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_v[sj],si);
vXtmp = vXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_w[sj],si);
wXtmp = wXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_h[sj],si);
eXtmp = eXtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
if (id.i < stencilSize) {
#if periodicX
perBCx(s_dil[sj],si);
#else
wallBCxDil(s_dil[sj],s_s0[sj],s_s4[sj],s_s8[sj],si);
#endif
}
__syncthreads();
derDevSharedV1x(&wrk2,s_dil[sj],si);
derDevShared1x(&wrk1 ,s_p[sj],si);
uXtmp = uXtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ;
eXtmp = eXtmp + s_m[sj][si]*wrk2/3.0*s_u[sj][si];
rX[id.g] = rXtmp;
uX[id.g] = uXtmp;
vX[id.g] = vXtmp;
wX[id.g] = wXtmp;
eX[id.g] = eXtmp ;
}
__global__ void RHSDeviceSharedFlxY(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil, myprec dpdz) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidYFlx();
int si = id.j + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rYtmp=0;
myprec uYtmp=0;
myprec vYtmp=0;
myprec wYtmp=0;
myprec eYtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][my+stencilSize*2];
__shared__ myprec s_u[sPencils][my+stencilSize*2];
__shared__ myprec s_v[sPencils][my+stencilSize*2];
__shared__ myprec s_w[sPencils][my+stencilSize*2];
__shared__ myprec s_h[sPencils][my+stencilSize*2];
__shared__ myprec s_t[sPencils][my+stencilSize*2];
__shared__ myprec s_p[sPencils][my+stencilSize*2];
__shared__ myprec s_m[sPencils][my+stencilSize*2];
__shared__ myprec s_l[sPencils][my+stencilSize*2];
__shared__ myprec s_s3[sPencils][my+stencilSize*2];
__shared__ myprec s_s4[sPencils][my+stencilSize*2];
__shared__ myprec s_s5[sPencils][my+stencilSize*2];
__shared__ myprec s_dil[sPencils][my+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
s_dil[sj][si] = dil[id.g];
s_s3[sj][si] = sij[3][id.g];
s_s4[sj][si] = sij[4][id.g];
s_s5[sj][si] = sij[5][id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.j < stencilSize) {
perBCy(s_r[sj],si); perBCy(s_u[sj],si);
perBCy(s_v[sj],si); perBCy(s_w[sj],si);
perBCy(s_h[sj],si); perBCy(s_t[sj],si);
perBCy(s_p[sj],si); perBCy(s_m[sj],si);
perBCy(s_l[sj],si);
}
__syncthreads();
//initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms
uYtmp = ( s_s3[sj][si] + sij[1][id.g] ) ;
vYtmp = ( 2 * s_s4[sj][si] - 2./3.*s_dil[sj][si] ) ;
wYtmp = ( s_s5[sj][si] + sij[7][id.g] ) ;
//adding the viscous dissipation part duidy*mu*siy
eYtmp = s_m[sj][si]*(uYtmp*s_s3[sj][si] + vYtmp*s_s4[sj][si] + wYtmp*s_s5[sj][si]);
//Adding here the terms d (mu) dy * siy;
derDevSharedV1y(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx
uYtmp *= wrk2;
vYtmp *= wrk2;
wYtmp *= wrk2;
// viscous fluxes derivative mu*d^2dui dy^2
derDevSharedV2y(&wrk1,s_u[sj],si);
uYtmp = uYtmp + wrk1*s_m[sj][si];
derDevSharedV2y(&wrk1,s_v[sj],si);
vYtmp = vYtmp + wrk1*s_m[sj][si];
derDevSharedV2y(&wrk1,s_w[sj],si);
wYtmp = wYtmp + wrk1*s_m[sj][si];
//adding the viscous dissipation part ui*(mu * d2duidy2 + dmudy * siy)
eYtmp = eYtmp + s_u[sj][si]*uYtmp + s_v[sj][si]*vYtmp + s_w[sj][si]*wYtmp;
derDevSharedV2y(&wrk1,s_t[sj],si);
eYtmp = eYtmp + wrk1*s_l[sj][si];
derDevSharedV1y(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx
derDevSharedV1y(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eYtmp = eYtmp + wrk1*wrk2;
// split advection terms
//Adding here the terms - d (ru phi) dy;
fluxQuadSharedy(&wrk1,s_r[sj],s_v[sj],si);
rYtmp = wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_u[sj],si);
uYtmp = uYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_v[sj],si);
vYtmp = vYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_w[sj],si);
wYtmp = wYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_h[sj],si);
eYtmp = eYtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
if (id.j < stencilSize) {
perBCy(s_dil[sj],si);
}
__syncthreads();
derDevSharedV1y(&wrk2,s_dil[sj],si);
derDevShared1y(&wrk1,s_p[sj],si);
vYtmp = vYtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ;
eYtmp = eYtmp + s_m[sj][si]*wrk2/3.0*s_v[sj][si];
#if useStreams
rY[id.g] = rYtmp;
uY[id.g] = uYtmp;
vY[id.g] = vYtmp;
wY[id.g] = wYtmp;
eY[id.g] = eYtmp;
#else
rY[id.g] += rYtmp;
uY[id.g] += uYtmp;
vY[id.g] += vYtmp;
wY[id.g] += wYtmp;
eY[id.g] += eYtmp;
#endif
}
__global__ void RHSDeviceSharedFlxZ(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil, myprec dpdz) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidZFlx();
int si = id.k + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rZtmp=0;
myprec uZtmp=0;
myprec vZtmp=0;
myprec wZtmp=0;
myprec eZtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][mz+stencilSize*2];
__shared__ myprec s_u[sPencils][mz+stencilSize*2];
__shared__ myprec s_v[sPencils][mz+stencilSize*2];
__shared__ myprec s_w[sPencils][mz+stencilSize*2];
__shared__ myprec s_h[sPencils][mz+stencilSize*2];
__shared__ myprec s_t[sPencils][mz+stencilSize*2];
__shared__ myprec s_p[sPencils][mz+stencilSize*2];
__shared__ myprec s_m[sPencils][mz+stencilSize*2];
__shared__ myprec s_l[sPencils][mz+stencilSize*2];
__shared__ myprec s_s6[sPencils][mz+stencilSize*2];
__shared__ myprec s_s7[sPencils][mz+stencilSize*2];
__shared__ myprec s_s8[sPencils][mz+stencilSize*2];
__shared__ myprec s_dil[sPencils][mz+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
s_s6[sj][si] = sij[6][id.g];
s_s7[sj][si] = sij[7][id.g];
s_s8[sj][si] = sij[8][id.g];
s_dil[sj][si] = dil[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.k < stencilSize) {
perBCz(s_r[sj],si); perBCz(s_u[sj],si);
perBCz(s_v[sj],si); perBCz(s_w[sj],si);
perBCz(s_h[sj],si); perBCz(s_t[sj],si);
perBCz(s_p[sj],si); perBCz(s_m[sj],si);
perBCz(s_l[sj],si);
}
__syncthreads();
//initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms
uZtmp = ( s_s6[sj][si] + sij[2][id.g] );
vZtmp = ( s_s7[sj][si] + sij[5][id.g] );
wZtmp = (2 * s_s8[sj][si] - 2./3.*s_dil[sj][si] );
//adding the viscous dissipation part duidz*mu*siz
eZtmp = s_m[sj][si]*(uZtmp*s_s6[sj][si] + vZtmp*s_s7[sj][si] + wZtmp*s_s8[sj][si]);
//Adding here the terms d (mu) dz * szj;
derDevSharedV1z(&wrk2,s_m[sj],si); //wrk2 = d (mu) dz
uZtmp *= wrk2;
vZtmp *= wrk2;
wZtmp *= wrk2;
// viscous fluxes derivative
derDevSharedV2z(&wrk1,s_u[sj],si);
uZtmp = uZtmp + wrk1*s_m[sj][si];
derDevSharedV2z(&wrk1,s_v[sj],si);
vZtmp = vZtmp + wrk1*s_m[sj][si];
derDevSharedV2z(&wrk1,s_w[sj],si);
wZtmp = wZtmp + wrk1*s_m[sj][si];
//adding the viscous dissipation part ui*(mu * d2duidz2 + dmudz * siz)
derDevSharedV2z(&wrk1,s_t[sj],si);
eZtmp = eZtmp + s_u[sj][si]*uZtmp + s_v[sj][si]*vZtmp + s_w[sj][si]*wZtmp + wrk1*s_l[sj][si];
derDevSharedV1z(&wrk2,s_l[sj],si); //wrk2 = d (lam) dz
derDevSharedV1z(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eZtmp = eZtmp + wrk1*wrk2;
//Adding here the terms - d (ru phi) dz;
fluxQuadSharedz(&wrk1,s_r[sj],s_w[sj],si);
rZtmp = wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_u[sj],si);
uZtmp = uZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_v[sj],si);
vZtmp = vZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_w[sj],si);
wZtmp = wZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_h[sj],si);
eZtmp = eZtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
__syncthreads();
if (id.k < stencilSize) {
perBCz(s_dil[sj],si);
}
__syncthreads();
derDevSharedV1z(&wrk2,s_dil[sj],si);
derDevShared1z(&wrk1,s_p[sj],si);
wZtmp = wZtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ;
eZtmp = eZtmp + s_m[sj][si]*wrk2/3.0*s_w[sj][si];
#if useStreams
rZ[id.g] = rZtmp;
uZ[id.g] = uZtmp;
vZ[id.g] = vZtmp;
wZ[id.g] = wZtmp + dpdz;
eZ[id.g] = eZtmp + dpdz*s_w[sj][si] ;
#else
rZ[id.g] += rZtmp;
uZ[id.g] += uZtmp;
vZ[id.g] += vZtmp;
wZ[id.g] += wZtmp + dpdz;
eZ[id.g] += eZtmp + dpdz*s_w[sj][si] ;
#endif
__syncthreads();
}
|
aef34a4dadbf5303ed2ca20667d0f9b801902743.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define isnan(x) ( x != x )
#ifndef INFINITY
#define INFINITY __int_as_float(0x7f800000)
#endif
#if (__CUDA_ARCH__ < 700)
__device__ void __nanosleep(unsigned int ns){
clock_t start_clock = clock();
clock_t clock_offset = 0;
while (clock_offset < ns)
{
clock_offset = clock() - start_clock;
}
}
#endif
__device__ void mutex_lock(
unsigned int *mutex
) {
unsigned int ns = 8;
unsigned int counter = 0;
__syncthreads();
if (threadIdx.x == 0 ){
while (atomicCAS(mutex, 0, 1) == 1) {
__nanosleep(ns);
counter ++;
if (counter > 1000) break;
if (ns < 256) {
ns *= 2;
}
}
}
__syncthreads();
}
__device__ void mutex_lock_noop(
) {
__syncthreads();
}
__device__ void mutex_unlock(
unsigned int *mutex
) {
__threadfence();
__syncthreads();
if (threadIdx.x == 0){
atomicExch(mutex, 0);
__threadfence();
}
__syncthreads();
}
__device__ void mutex_unlock_noop(){
__syncthreads();
__syncthreads();
}
__device__ __forceinline__ unsigned int bfe(
unsigned int source,
unsigned int bitIndex
) {
unsigned int bit;
asm volatile("bfe.u32 %0, %1, %2, %3;" : "=r"(bit) : "r"((unsigned int) source), "r"(bitIndex), "r"(1));
return bit;
}
__device__ __forceinline__ void warp_comparator(
float &value,
float &index,
const int stride,
const int direction
){
const float other_value = __shfl_xor_sync(0xFFFFFFFF, value, stride);
const float other_index = __shfl_xor_sync(0xFFFFFFFF, index, stride);
bool condition = value < other_value == direction;
index = condition ? other_index : index;
value = condition ? other_value : value;
}
__device__ __forceinline__ void block_comparator(
float &value,
float &index,
const int stride,
const int direction,
const int laneID,
_VOLATILE_ float valSmem[128+4],
_VOLATILE_ float idxSmem[128+4]
){
valSmem[laneID] = value;
idxSmem[laneID] = index;
__syncthreads();
float other_value = valSmem[laneID ^ stride];
float other_index = idxSmem[laneID ^ stride];
__syncthreads();
bool condition = value < other_value == direction;
index = condition ? other_index : index;
value = condition ? other_value : value;
}
__device__ void bitonic_sort_128(
float &value,
float &index,
_VOLATILE_ float valSmem[128+4],
_VOLATILE_ float idxSmem[128+4]
) {
unsigned int laneID = threadIdx.x % 128;
warp_comparator(value, index, 1, bfe(laneID, 1) ^ bfe(laneID, 0));
warp_comparator(value, index, 2, bfe(laneID, 2) ^ bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 2) ^ bfe(laneID, 0));
warp_comparator(value, index, 4, bfe(laneID, 3) ^ bfe(laneID, 2));
warp_comparator(value, index, 2, bfe(laneID, 3) ^ bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 3) ^ bfe(laneID, 0));
warp_comparator(value, index, 8, bfe(laneID, 4) ^ bfe(laneID, 3));
warp_comparator(value, index, 4, bfe(laneID, 4) ^ bfe(laneID, 2));
warp_comparator(value, index, 2, bfe(laneID, 4) ^ bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 4) ^ bfe(laneID, 0));
warp_comparator(value, index, 16, bfe(laneID, 5) ^ bfe(laneID, 4));
warp_comparator(value, index, 8, bfe(laneID, 5) ^ bfe(laneID, 3));
warp_comparator(value, index, 4, bfe(laneID, 5) ^ bfe(laneID, 2));
warp_comparator(value, index, 2, bfe(laneID, 5) ^ bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 5) ^ bfe(laneID, 0));
block_comparator(value, index, 32, bfe(laneID, 6) ^ bfe(laneID, 5), laneID, valSmem, idxSmem);
warp_comparator(value, index, 16, bfe(laneID, 6) ^ bfe(laneID, 4));
warp_comparator(value, index, 8, bfe(laneID, 6) ^ bfe(laneID, 3));
warp_comparator(value, index, 4, bfe(laneID, 6) ^ bfe(laneID, 2));
warp_comparator(value, index, 2, bfe(laneID, 6) ^ bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 6) ^ bfe(laneID, 0));
block_comparator(value, index, 64, bfe(laneID, 6), laneID, valSmem, idxSmem);
block_comparator(value, index, 32, bfe(laneID, 5), laneID, valSmem, idxSmem);
warp_comparator(value, index, 16, bfe(laneID, 4));
warp_comparator(value, index, 8, bfe(laneID, 3));
warp_comparator(value, index, 4, bfe(laneID, 2));
warp_comparator(value, index, 2, bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 0));
}
__device__ void bitonic_sort_256(
float &value,
float &index,
float* g_values,
ll_t* g_indices,
float valSmem[128+4],
float idxSmem[128+4],
int Q, int adr, bool ok
){
int laneID = threadIdx.x % 128;
float other_index;
float other_value;
if (ok){
other_value = g_values[adr];
other_index = g_indices[adr];
} else {
other_value = -INFINITY;
other_index = 0;
}
bool condition = value > other_value == 0;
if (condition){
value = value + other_value;
index = index + other_index;
other_value = value - other_value;
other_index = index - other_index;
value = value - other_value;
index = index - other_index;
}
block_comparator(value, index, 64, !bfe(laneID, 6), laneID, valSmem, idxSmem);
block_comparator(value, index, 32, !bfe(laneID, 5), laneID, valSmem, idxSmem);
warp_comparator(value, index, 16, !bfe(laneID, 4));
warp_comparator(value, index, 8, !bfe(laneID, 3));
warp_comparator(value, index, 4, !bfe(laneID, 2));
warp_comparator(value, index, 2, !bfe(laneID, 1));
warp_comparator(value, index, 1, !bfe(laneID, 0));
/*
*/
if (ok){
g_values[adr] = value;
g_indices[adr] = index;
}
}
__device__ void bitonicSort256_noop()
{
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
}
__device__ void topk_dim_1(
float8 cCache[8],
_VOLATILE_ float valSmem[16][128+4],
_VOLATILE_ float idxSmem[16][128+4],
float* values,
ll_t* indices,
unsigned int* mutex,
int gStartx, int gStarty, int bid,
int M, int N, int Q
){
int tid = threadIdx.x;
int vx = tid % 16;
int vy = tid / 16;
int hx = tid % 128;
int hy = tid / 128;
#pragma unroll
for (int ni=0; ni<8; ni++){
int iN = gStartx + vx*8 + ni;
//if (iN < N) break;
// Store cCache to cSM
#pragma unroll
for (int mi=0; mi<8; mi++){
int iM = gStarty + vy*8 + mi;
if (likely(iM < M && iN < N)){
valSmem[vx][vy*8 + mi] = cCache[mi].val[ni];
idxSmem[vx][vy*8 + mi] = iM;
} else {
valSmem[vx][vy*8 + mi] = -INFINITY;
idxSmem[vx][vy*8 + mi] = -1;
}
}
__syncthreads();
// Load from cSM to cCache
#pragma unroll
for (int i=0; i<8; i++){
float value = valSmem[hy*8 + i][hx];
float index = idxSmem[hy*8 + i][hx];
bitonic_sort_128(
value, index,
valSmem[hy*8 + i], idxSmem[hy*8 + i]
);
int iN = gStartx + (hy*8 + i)*8 + ni;
int adr = (bid)*N*Q + iN*Q + hx;
mutex_lock( &mutex[(bid)*N + iN] );
bitonic_sort_256(
value, index,
values, indices,
valSmem[hy*8+i], idxSmem[hy*8+i],
Q, adr, iN < N
);
mutex_unlock( &mutex[(bid)*N + iN] );
}
}
}
__device__ void topk_dim_2(
float8 cCache[8],
_VOLATILE_ float valSmem[16][128+4],
_VOLATILE_ float idxSmem[16][128+4],
float* values,
ll_t* indices,
unsigned int* mutex,
int gStartx, int gStarty, int bid,
int M, int N, int Q
){
int tid = threadIdx.x;
int vx = tid % 16;
int vy = tid / 16;
int hx = tid % 128;
int hy = tid / 128;
#pragma unroll
for (int mi=0; mi<8; mi++){
int iM = gStarty + vy*8 + mi;
//if (iM >= M) break;
// Store cCache to cSM
#pragma unroll
for (int ni=0; ni<8; ni++){
int iN = gStartx + vx*8 + ni;
if (likely(iN < N && iM < M)){
valSmem[vy][vx*8 + ni] = cCache[mi].val[ni];
idxSmem[vy][vx*8 + ni] = iN;
} else {
valSmem[vy][vx*8 + ni] = -INFINITY;
idxSmem[vy][vx*8 + ni] = -1;
}
}
__syncthreads();
// Load from cSM to cCache
#pragma unroll
for (int i=0; i<8; i++){
float value = valSmem[hy*8 + i][hx];
float index = idxSmem[hy*8 + i][hx];
bitonic_sort_128(
value, index,
valSmem[hy*8 + i], idxSmem[hy*8 + i]
);
int iM = gStarty + (hy*8 + i)*8 + mi;
int adr = (bid)*M*Q + iM*Q + hx;
mutex_lock( &mutex[(bid)*M + iM] );
bitonic_sort_256(
value, index,
values, indices,
valSmem[hy*8+i], idxSmem[hy*8+i],
Q, adr, iM < M
);
mutex_unlock( &mutex[(bid)*M + iM] );
}
}
}
extern "C"
__global__ void topk_bmm_tn(
const float* __restrict__ A,
const float* __restrict__ B,
float* values,
ll_t* indices,
unsigned int* mutex,
int M, int N, int K, int DIM, int Q
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
load_ab_tn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_tn(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_tn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// TopK sort along DIM
if (DIM == 1){
topk_dim_1(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
} else if (DIM == 2){
topk_dim_2(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
}
}
extern "C"
__global__ void topk_bmm_nt(
const float* __restrict__ A,
const float* __restrict__ B,
float* values,
ll_t* indices,
unsigned int* mutex,
int M, int N, int K, int DIM, int Q
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
load_ab_nt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_nt(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_nt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// TopK sort along DIM
if (DIM == 1){
topk_dim_1(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
} else if (DIM == 2){
topk_dim_2(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
}
}
extern "C"
__global__ void topk_bmm_nn(
const float* __restrict__ A,
const float* __restrict__ B,
float* values,
ll_t* indices,
unsigned int* mutex,
int M, int N, int K, int DIM, int Q
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
load_ab_nn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_nn(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_nn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// TopK sort along DIM
if (DIM == 1){
topk_dim_1(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
} else if (DIM == 2){
topk_dim_2(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
}
}
extern "C"
__global__ void topk_bmm_tt(
const float* __restrict__ A,
const float* __restrict__ B,
float* values,
ll_t* indices,
unsigned int* mutex,
int M, int N, int K, int DIM, int Q
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
load_ab_tt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_tt(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_tt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// TopK sort along DIM
if (DIM == 1){
topk_dim_1(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
} else if (DIM == 2){
topk_dim_2(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
}
} | aef34a4dadbf5303ed2ca20667d0f9b801902743.cu | #define isnan(x) ( x != x )
#ifndef INFINITY
#define INFINITY __int_as_float(0x7f800000)
#endif
#if (__CUDA_ARCH__ < 700)
__device__ void __nanosleep(unsigned int ns){
clock_t start_clock = clock();
clock_t clock_offset = 0;
while (clock_offset < ns)
{
clock_offset = clock() - start_clock;
}
}
#endif
__device__ void mutex_lock(
unsigned int *mutex
) {
unsigned int ns = 8;
unsigned int counter = 0;
__syncthreads();
if (threadIdx.x == 0 ){
while (atomicCAS(mutex, 0, 1) == 1) {
__nanosleep(ns);
counter ++;
if (counter > 1000) break;
if (ns < 256) {
ns *= 2;
}
}
}
__syncthreads();
}
__device__ void mutex_lock_noop(
) {
__syncthreads();
}
__device__ void mutex_unlock(
unsigned int *mutex
) {
__threadfence();
__syncthreads();
if (threadIdx.x == 0){
atomicExch(mutex, 0);
__threadfence();
}
__syncthreads();
}
__device__ void mutex_unlock_noop(){
__syncthreads();
__syncthreads();
}
__device__ __forceinline__ unsigned int bfe(
unsigned int source,
unsigned int bitIndex
) {
unsigned int bit;
asm volatile("bfe.u32 %0, %1, %2, %3;" : "=r"(bit) : "r"((unsigned int) source), "r"(bitIndex), "r"(1));
return bit;
}
__device__ __forceinline__ void warp_comparator(
float &value,
float &index,
const int stride,
const int direction
){
const float other_value = __shfl_xor_sync(0xFFFFFFFF, value, stride);
const float other_index = __shfl_xor_sync(0xFFFFFFFF, index, stride);
bool condition = value < other_value == direction;
index = condition ? other_index : index;
value = condition ? other_value : value;
}
__device__ __forceinline__ void block_comparator(
float &value,
float &index,
const int stride,
const int direction,
const int laneID,
_VOLATILE_ float valSmem[128+4],
_VOLATILE_ float idxSmem[128+4]
){
valSmem[laneID] = value;
idxSmem[laneID] = index;
__syncthreads();
float other_value = valSmem[laneID ^ stride];
float other_index = idxSmem[laneID ^ stride];
__syncthreads();
bool condition = value < other_value == direction;
index = condition ? other_index : index;
value = condition ? other_value : value;
}
__device__ void bitonic_sort_128(
float &value,
float &index,
_VOLATILE_ float valSmem[128+4],
_VOLATILE_ float idxSmem[128+4]
) {
unsigned int laneID = threadIdx.x % 128;
warp_comparator(value, index, 1, bfe(laneID, 1) ^ bfe(laneID, 0));
warp_comparator(value, index, 2, bfe(laneID, 2) ^ bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 2) ^ bfe(laneID, 0));
warp_comparator(value, index, 4, bfe(laneID, 3) ^ bfe(laneID, 2));
warp_comparator(value, index, 2, bfe(laneID, 3) ^ bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 3) ^ bfe(laneID, 0));
warp_comparator(value, index, 8, bfe(laneID, 4) ^ bfe(laneID, 3));
warp_comparator(value, index, 4, bfe(laneID, 4) ^ bfe(laneID, 2));
warp_comparator(value, index, 2, bfe(laneID, 4) ^ bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 4) ^ bfe(laneID, 0));
warp_comparator(value, index, 16, bfe(laneID, 5) ^ bfe(laneID, 4));
warp_comparator(value, index, 8, bfe(laneID, 5) ^ bfe(laneID, 3));
warp_comparator(value, index, 4, bfe(laneID, 5) ^ bfe(laneID, 2));
warp_comparator(value, index, 2, bfe(laneID, 5) ^ bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 5) ^ bfe(laneID, 0));
block_comparator(value, index, 32, bfe(laneID, 6) ^ bfe(laneID, 5), laneID, valSmem, idxSmem);
warp_comparator(value, index, 16, bfe(laneID, 6) ^ bfe(laneID, 4));
warp_comparator(value, index, 8, bfe(laneID, 6) ^ bfe(laneID, 3));
warp_comparator(value, index, 4, bfe(laneID, 6) ^ bfe(laneID, 2));
warp_comparator(value, index, 2, bfe(laneID, 6) ^ bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 6) ^ bfe(laneID, 0));
block_comparator(value, index, 64, bfe(laneID, 6), laneID, valSmem, idxSmem);
block_comparator(value, index, 32, bfe(laneID, 5), laneID, valSmem, idxSmem);
warp_comparator(value, index, 16, bfe(laneID, 4));
warp_comparator(value, index, 8, bfe(laneID, 3));
warp_comparator(value, index, 4, bfe(laneID, 2));
warp_comparator(value, index, 2, bfe(laneID, 1));
warp_comparator(value, index, 1, bfe(laneID, 0));
}
__device__ void bitonic_sort_256(
float &value,
float &index,
float* g_values,
ll_t* g_indices,
float valSmem[128+4],
float idxSmem[128+4],
int Q, int adr, bool ok
){
int laneID = threadIdx.x % 128;
float other_index;
float other_value;
if (ok){
other_value = g_values[adr];
other_index = g_indices[adr];
} else {
other_value = -INFINITY;
other_index = 0;
}
bool condition = value > other_value == 0;
if (condition){
value = value + other_value;
index = index + other_index;
other_value = value - other_value;
other_index = index - other_index;
value = value - other_value;
index = index - other_index;
}
block_comparator(value, index, 64, !bfe(laneID, 6), laneID, valSmem, idxSmem);
block_comparator(value, index, 32, !bfe(laneID, 5), laneID, valSmem, idxSmem);
warp_comparator(value, index, 16, !bfe(laneID, 4));
warp_comparator(value, index, 8, !bfe(laneID, 3));
warp_comparator(value, index, 4, !bfe(laneID, 2));
warp_comparator(value, index, 2, !bfe(laneID, 1));
warp_comparator(value, index, 1, !bfe(laneID, 0));
/*
*/
if (ok){
g_values[adr] = value;
g_indices[adr] = index;
}
}
__device__ void bitonicSort256_noop()
{
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
}
__device__ void topk_dim_1(
float8 cCache[8],
_VOLATILE_ float valSmem[16][128+4],
_VOLATILE_ float idxSmem[16][128+4],
float* values,
ll_t* indices,
unsigned int* mutex,
int gStartx, int gStarty, int bid,
int M, int N, int Q
){
int tid = threadIdx.x;
int vx = tid % 16;
int vy = tid / 16;
int hx = tid % 128;
int hy = tid / 128;
#pragma unroll
for (int ni=0; ni<8; ni++){
int iN = gStartx + vx*8 + ni;
//if (iN < N) break;
// Store cCache to cSM
#pragma unroll
for (int mi=0; mi<8; mi++){
int iM = gStarty + vy*8 + mi;
if (likely(iM < M && iN < N)){
valSmem[vx][vy*8 + mi] = cCache[mi].val[ni];
idxSmem[vx][vy*8 + mi] = iM;
} else {
valSmem[vx][vy*8 + mi] = -INFINITY;
idxSmem[vx][vy*8 + mi] = -1;
}
}
__syncthreads();
// Load from cSM to cCache
#pragma unroll
for (int i=0; i<8; i++){
float value = valSmem[hy*8 + i][hx];
float index = idxSmem[hy*8 + i][hx];
bitonic_sort_128(
value, index,
valSmem[hy*8 + i], idxSmem[hy*8 + i]
);
int iN = gStartx + (hy*8 + i)*8 + ni;
int adr = (bid)*N*Q + iN*Q + hx;
mutex_lock( &mutex[(bid)*N + iN] );
bitonic_sort_256(
value, index,
values, indices,
valSmem[hy*8+i], idxSmem[hy*8+i],
Q, adr, iN < N
);
mutex_unlock( &mutex[(bid)*N + iN] );
}
}
}
__device__ void topk_dim_2(
float8 cCache[8],
_VOLATILE_ float valSmem[16][128+4],
_VOLATILE_ float idxSmem[16][128+4],
float* values,
ll_t* indices,
unsigned int* mutex,
int gStartx, int gStarty, int bid,
int M, int N, int Q
){
int tid = threadIdx.x;
int vx = tid % 16;
int vy = tid / 16;
int hx = tid % 128;
int hy = tid / 128;
#pragma unroll
for (int mi=0; mi<8; mi++){
int iM = gStarty + vy*8 + mi;
//if (iM >= M) break;
// Store cCache to cSM
#pragma unroll
for (int ni=0; ni<8; ni++){
int iN = gStartx + vx*8 + ni;
if (likely(iN < N && iM < M)){
valSmem[vy][vx*8 + ni] = cCache[mi].val[ni];
idxSmem[vy][vx*8 + ni] = iN;
} else {
valSmem[vy][vx*8 + ni] = -INFINITY;
idxSmem[vy][vx*8 + ni] = -1;
}
}
__syncthreads();
// Load from cSM to cCache
#pragma unroll
for (int i=0; i<8; i++){
float value = valSmem[hy*8 + i][hx];
float index = idxSmem[hy*8 + i][hx];
bitonic_sort_128(
value, index,
valSmem[hy*8 + i], idxSmem[hy*8 + i]
);
int iM = gStarty + (hy*8 + i)*8 + mi;
int adr = (bid)*M*Q + iM*Q + hx;
mutex_lock( &mutex[(bid)*M + iM] );
bitonic_sort_256(
value, index,
values, indices,
valSmem[hy*8+i], idxSmem[hy*8+i],
Q, adr, iM < M
);
mutex_unlock( &mutex[(bid)*M + iM] );
}
}
}
extern "C"
__global__ void topk_bmm_tn(
const float* __restrict__ A,
const float* __restrict__ B,
float* values,
ll_t* indices,
unsigned int* mutex,
int M, int N, int K, int DIM, int Q
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
load_ab_tn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_tn(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_tn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// TopK sort along DIM
if (DIM == 1){
topk_dim_1(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
} else if (DIM == 2){
topk_dim_2(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
}
}
extern "C"
__global__ void topk_bmm_nt(
const float* __restrict__ A,
const float* __restrict__ B,
float* values,
ll_t* indices,
unsigned int* mutex,
int M, int N, int K, int DIM, int Q
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
load_ab_nt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_nt(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_nt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// TopK sort along DIM
if (DIM == 1){
topk_dim_1(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
} else if (DIM == 2){
topk_dim_2(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
}
}
extern "C"
__global__ void topk_bmm_nn(
const float* __restrict__ A,
const float* __restrict__ B,
float* values,
ll_t* indices,
unsigned int* mutex,
int M, int N, int K, int DIM, int Q
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
load_ab_nn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_nn(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_nn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// TopK sort along DIM
if (DIM == 1){
topk_dim_1(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
} else if (DIM == 2){
topk_dim_2(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
}
}
extern "C"
__global__ void topk_bmm_tt(
const float* __restrict__ A,
const float* __restrict__ B,
float* values,
ll_t* indices,
unsigned int* mutex,
int M, int N, int K, int DIM, int Q
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
load_ab_tt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_tt(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_tt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// TopK sort along DIM
if (DIM == 1){
topk_dim_1(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
} else if (DIM == 2){
topk_dim_2(
cCache, aSmem, bSmem,
values, indices, mutex,
gStartx, gStarty, bid, M, N, Q);
}
} |
7c9e3ba4e0fe09257447a29a97a107f46260ee70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// filename: eeTanh.cu
// a simple CUDA kernel to square the elements of a matrix
extern "C" // ensure function name to be exactly "eeTanh"
{
}
__global__ void normLogErrDeriv(int N, int M, float *A, float *Y, float *out)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
int L = N*M;
if (i < N && j < M)
{
// A2 in this case is stored in the doubled rows of A, the length of A is
// doublt that of Y, out is the same length as A and will store both parts of the derivative
float a = __expf(__fmul_rn(2.0, A[index+L]));
float b = __fsub_rn(A[index], Y[index]);
out[index] = __fmul_rn(b, a);
out[index+L] = __fsub_rn(__fmul_rn(out[index], b), 1.0);
}
} | 7c9e3ba4e0fe09257447a29a97a107f46260ee70.cu | #include "includes.h"
// filename: eeTanh.cu
// a simple CUDA kernel to square the elements of a matrix
extern "C" // ensure function name to be exactly "eeTanh"
{
}
__global__ void normLogErrDeriv(int N, int M, float *A, float *Y, float *out)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
int L = N*M;
if (i < N && j < M)
{
// A2 in this case is stored in the doubled rows of A, the length of A is
// doublt that of Y, out is the same length as A and will store both parts of the derivative
float a = __expf(__fmul_rn(2.0, A[index+L]));
float b = __fsub_rn(A[index], Y[index]);
out[index] = __fmul_rn(b, a);
out[index+L] = __fsub_rn(__fmul_rn(out[index], b), 1.0);
}
} |
3834503ed5196dfe08fe98812e2a473f41c0163b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "max_subsampling_layer_updater_schema.h"
#include "../neural_network_exception.h"
#include "../max_subsampling_layer.h"
#include "max_subsampling_layer_updater_hip.cuh"
#include "max_subsampling_layer_cudnn_updater_cuda.h"
#include <boost/format.hpp>
namespace nnforge
{
namespace cuda
{
max_subsampling_layer_updater_schema::max_subsampling_layer_updater_schema()
{
}
max_subsampling_layer_updater_schema::~max_subsampling_layer_updater_schema()
{
}
layer_updater_schema::ptr max_subsampling_layer_updater_schema::create_specific() const
{
return layer_updater_schema::ptr(new max_subsampling_layer_updater_schema());
}
std::string max_subsampling_layer_updater_schema::get_type_name() const
{
return max_subsampling_layer::layer_type_name;
}
layer_updater_cuda::ptr max_subsampling_layer_updater_schema::create_updater_specific(
const std::vector<layer_configuration_specific>& input_configuration_specific_list,
const layer_configuration_specific& output_configuration_specific) const
{
layer_updater_cuda::ptr res;
nnforge_shared_ptr<const max_subsampling_layer> layer_derived = nnforge_dynamic_pointer_cast<const max_subsampling_layer>(layer_schema);
if (layer_derived->tiling)
throw neural_network_exception("There is no CUDA updater for max subsampling layer with tiling");
switch (output_configuration_specific.dimension_sizes.size())
{
case 1:
res = layer_updater_cuda::ptr(new max_subsampling_layer_updater_cuda<1>());
break;
case 2:
res = layer_updater_cuda::ptr(new max_subsampling_layer_updater_cuda<2>());
//res = layer_updater_cuda::ptr(new max_subsampling_layer_cudnn_updater_cuda());
break;
case 3:
res = layer_updater_cuda::ptr(new max_subsampling_layer_updater_cuda<3>());
break;
case 4:
res = layer_updater_cuda::ptr(new max_subsampling_layer_updater_cuda<4>());
break;
default:
throw neural_network_exception((boost::format("No CUDA updater for the max subsampling layer of %1% dimensions") % output_configuration_specific.dimension_sizes.size()).str());
}
return res;
}
}
}
| 3834503ed5196dfe08fe98812e2a473f41c0163b.cu | /*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "max_subsampling_layer_updater_schema.h"
#include "../neural_network_exception.h"
#include "../max_subsampling_layer.h"
#include "max_subsampling_layer_updater_cuda.cuh"
#include "max_subsampling_layer_cudnn_updater_cuda.h"
#include <boost/format.hpp>
namespace nnforge
{
namespace cuda
{
max_subsampling_layer_updater_schema::max_subsampling_layer_updater_schema()
{
}
max_subsampling_layer_updater_schema::~max_subsampling_layer_updater_schema()
{
}
layer_updater_schema::ptr max_subsampling_layer_updater_schema::create_specific() const
{
return layer_updater_schema::ptr(new max_subsampling_layer_updater_schema());
}
std::string max_subsampling_layer_updater_schema::get_type_name() const
{
return max_subsampling_layer::layer_type_name;
}
layer_updater_cuda::ptr max_subsampling_layer_updater_schema::create_updater_specific(
const std::vector<layer_configuration_specific>& input_configuration_specific_list,
const layer_configuration_specific& output_configuration_specific) const
{
layer_updater_cuda::ptr res;
nnforge_shared_ptr<const max_subsampling_layer> layer_derived = nnforge_dynamic_pointer_cast<const max_subsampling_layer>(layer_schema);
if (layer_derived->tiling)
throw neural_network_exception("There is no CUDA updater for max subsampling layer with tiling");
switch (output_configuration_specific.dimension_sizes.size())
{
case 1:
res = layer_updater_cuda::ptr(new max_subsampling_layer_updater_cuda<1>());
break;
case 2:
res = layer_updater_cuda::ptr(new max_subsampling_layer_updater_cuda<2>());
//res = layer_updater_cuda::ptr(new max_subsampling_layer_cudnn_updater_cuda());
break;
case 3:
res = layer_updater_cuda::ptr(new max_subsampling_layer_updater_cuda<3>());
break;
case 4:
res = layer_updater_cuda::ptr(new max_subsampling_layer_updater_cuda<4>());
break;
default:
throw neural_network_exception((boost::format("No CUDA updater for the max subsampling layer of %1% dimensions") % output_configuration_specific.dimension_sizes.size()).str());
}
return res;
}
}
}
|
983a8a6edbea2da6321409c846c66fa33eb86033.hip | // !!! This is a file automatically generated by hipify!!!
/**
*
* bashCGPU/CUDA
*
https://suzukiiichiro.github.io/search/?keyword=
*
*/
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
//
//#define UINT64_C(c) c ## ULL
//
//
unsigned long TOTAL=0;
unsigned long UNIQUE=0;
//GPU
typedef struct local
{
unsigned int BOUND1,BOUND2;
unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
}local;
// CPU /
void symmetryOps(unsigned int size,struct local* l)
{
/**
(1) 9090(
180)90(270)
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&&l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT2++;
return ;
}//end if
}//end if
/**
(2) 90270
180
180
()
*/
//
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT4++;
return ;
}
}//end if
/**
(3)180()
*/
//
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
}
// Q
void symmetry_backTrack_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>0){
if(bitmap[row]>0){
if(row<l->BOUND1){ //
bitmap[row]|=l->SIDEMASK;
bitmap[row]^=l->SIDEMASK;
}else if(row==l->BOUND2){ //
if((down[row]&l->SIDEMASK)==0){
row--;
}
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){
bitmap[row]&=l->SIDEMASK;
}
}
unsigned int save_bitmap=bitmap[row];
unsigned int bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
l->board[row]=bit; //Q
if((bit&mask)!=0){
if(row==(size-1)){
if( (save_bitmap&l->LASTMASK)==0){
symmetryOps(size,l); //
}
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}else{
row--;
}
}//end while
}
// Q
void symmetry_backTrack_corner_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int bit=0;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>=2){
if(row<l->BOUND1){
// bitmap[row]=bitmap[row]|2;
// bitmap[row]=bitmap[row]^2;
bitmap[row]&=~2;
}
if(bitmap[row]>0){
bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
if(row==(size-1)){
l->COUNT8++;
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
l->board[row]=bit; //Q
//
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}//end while
}
//
void symmetry_NR(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->SIDEMASK=l->LASTMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1&&l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //Q
//Q
symmetry_backTrack_corner_NR(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Q
//Q
symmetry_backTrack_NR(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
// Q
void symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap&l->LASTMASK)==0){
l->board[row]=bitmap; //Q
symmetryOps(size,l); //
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}//end while
}//end if
}
// Q
void symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
}
}else{
if(row<l->BOUND1){ //
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Q
symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
//
void symmetry_R(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->LASTMASK=l->SIDEMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1 && l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //Q
//Q
symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}//end while
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Q
//Q
symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
// GPU
__host__ __device__
void GPU_symmetryOps(unsigned int size,struct local* l)
{
/**
(1) 9090(
180)90(270)
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&& l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT2++;
return ;
}//end if
}//end if
/**
(2) 90270
180
180
()
*/
//
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT4++;
return ;
}
}//end if
/**
(3)180()
*/
//
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
}
// GPU Q
__host__ __device__
void GPU_symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap& l->LASTMASK)==0){
l->board[row]=bitmap; //Q
GPU_symmetryOps(size,l); //
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
GPU_symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}//end while
}//end if
}
// GPU Q
__host__ __device__
void GPU_symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
}
}else{
if(row<l->BOUND1){ //
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Q
GPU_symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// GPU
__host__ __device__
long GPU_symmetry_solve_nodeLayer(unsigned int size,unsigned long left,unsigned long down,unsigned long right)
{
long mask=(1<<size)-1;
long counter=0;
if (down==mask) { // down
return 1;
}
long bit=0;
for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){
bit=-bitmap&bitmap;
counter += GPU_symmetry_solve_nodeLayer(size,(left|bit)>>1,(down|bit),(right|bit)<< 1);
}
return counter;
}
// GPU
__host__ __device__
void GPU_symmetry_R(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->LASTMASK=l->SIDEMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1 && l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //Q
//Q
GPU_symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}//end while
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Q
//Q
GPU_symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
l->UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
l->TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
// i i
__global__
void dim_nodeLayer(unsigned int size,long* nodes,long* solutions,unsigned int numElements)
{
int i=blockDim.x * blockIdx.x+threadIdx.x;
if(i<numElements){
solutions[i]=GPU_symmetry_solve_nodeLayer(size,nodes[3 * i],nodes[3 * i + 1],nodes[3 * i + 2]);
}
}
// 0bit
int countBits_nodeLayer(long n)
{
int counter=0;
while (n){
n &= (n-1); //
counter++;
}
return counter;
}
// k
long kLayer_nodeLayer(int size,std::vector<long>& nodes,int k,long left,long down,long right)
{
long counter=0;
long mask=(1<<size)-1;
// down
if (countBits_nodeLayer(down) == k) {
nodes.push_back(left);
nodes.push_back(down);
nodes.push_back(right);
return 1;
}
long bit=0;
for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){
bit=-bitmap&bitmap;
//
counter+=kLayer_nodeLayer(size,nodes,k,(left|bit)>>1,(down|bit),(right|bit)<<1);
}
return counter;
}
// k
std::vector<long> kLayer_nodeLayer(int size,int k)
{
std::vector<long> nodes{};
kLayer_nodeLayer(size,nodes,k,0,0,0);
return nodes;
}
//
void symmetry_build_nodeLayer(int size)
{
// 3
//3
// 2
// 4N169844
/* local l[MAX]; // */
//std::vector<long> nodes=kLayer_nodeLayer(size,4,&l[0]);
std::vector<long> nodes=kLayer_nodeLayer(size,4);
//
//
size_t nodeSize=nodes.size() * sizeof(long);
long* hostNodes=(long*)malloc(nodeSize);
hostNodes=&nodes[0];
long* deviceNodes=NULL;
hipMalloc((void**)&deviceNodes,nodeSize);
hipMemcpy(deviceNodes,hostNodes,nodeSize,hipMemcpyHostToDevice);
//
long* deviceSolutions=NULL;
// 3
int numSolutions=nodes.size() / 6;
size_t solutionSize=numSolutions * sizeof(long);
hipMalloc((void**)&deviceSolutions,solutionSize);
// CUDA
int threadsPerBlock=256;
int blocksPerGrid=(numSolutions + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( dim_nodeLayer) , dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, size,deviceNodes,deviceSolutions,numSolutions);
//
long* hostSolutions=(long*)malloc(solutionSize);
hipMemcpy(hostSolutions,deviceSolutions,solutionSize,hipMemcpyDeviceToHost);
//
long solutions=0;
for(long i=0;i<numSolutions;i++){
solutions += 2*hostSolutions[i]; // Symmetry
}
//
TOTAL=solutions;
}
// CUDA
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
struct hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
hipSetDevice(i);
return true;
}
//
int main(int argc,char** argv)
{
bool cpu=false,cpur=false,gpu=false,gpuNodeLayer=false;
int argstart=2;
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuNodeLayer=true;}
else{ gpuNodeLayer=true; } //gpu
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]);
printf(" -r: CPU \n");
printf(" -c: CPU \n");
printf(" -g: GPU \n");
printf(" -n: GPU \n");
}
if(cpur){ printf("\n\n \n"); }
else if(cpu){ printf("\n\n \n"); }
else if(gpu){ printf("\n\n GPU\n"); }
else if(gpuNodeLayer){ printf("\n\n GPU \n"); }
if(cpu||cpur)
{
int min=4;
int targetN=17;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int size=min;size<=targetN;size++){
local l;
gettimeofday(&t0,NULL);//
if(cpur){ //
symmetry_R(size,&l);
}
if(cpu){ //
symmetry_NR(size,&l);
}
//
gettimeofday(&t1,NULL);//
int ss;int ms;int dd;
if(t1.tv_usec<t0.tv_usec) {
dd=(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
}else {
dd=(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
} //end for
}//end if
if(gpu||gpuNodeLayer)
{
if(!InitCUDA()){return 0;}
/* int steps=24576; */
int min=4;
int targetN=21;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int size=min;size<=targetN;size++){
gettimeofday(&t0,NULL); //
if(gpu){
TOTAL=UNIQUE=0;
local l[MAX];
GPU_symmetry_R(size,&l[0]);
TOTAL=l->TOTAL;
UNIQUE=l->UNIQUE;
}else if(gpuNodeLayer){
TOTAL=UNIQUE=0;
symmetry_build_nodeLayer(size); //
}
gettimeofday(&t1,NULL); //
int ss;int ms;int dd;
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}//end for
}//end if
return 0;
}
| 983a8a6edbea2da6321409c846c66fa33eb86033.cu | /**
*
* bash版対称解除法のC言語版のGPU/CUDA移植版
*
詳しい説明はこちらをどうぞ
https://suzukiiichiro.github.io/search/?keyword=Nクイーン問題
*
*/
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
// システムによって以下のマクロが必要であればコメントを外してください。
//#define UINT64_C(c) c ## ULL
//
// グローバル変数
unsigned long TOTAL=0;
unsigned long UNIQUE=0;
//GPU で使うローカル構造体
typedef struct local
{
unsigned int BOUND1,BOUND2;
unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
}local;
// CPU 再帰/非再帰 対称解除法
void symmetryOps(unsigned int size,struct local* l)
{
/**
2.クイーンが右上角以外にある場合、
(1) 90度回転させてオリジナルと同型になる場合、さらに90度回転(オリジナルか
ら180度回転)させても、さらに90度回転(オリジナルから270度回転)させてもオリ
ジナルと同型になる。
こちらに該当するユニーク解が属するグループの要素数は、左右反転させたパター
ンを加えて2個しかありません。
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&&l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
// 90度回転して同型なら180度回転しても270度回転しても同型である
if(own>size-1){
l->COUNT2++;
return ;
}//end if
}//end if
/**
2.クイーンが右上角以外にある場合、
(2) 90度回転させてオリジナルと異なる場合は、270度回転させても必ずオリジナル
とは異なる。ただし、180度回転させた場合はオリジナルと同型になることも有り得
る。こちらに該当するユニーク解が属するグループの要素数は、180度回転させて同
型になる場合は4個(左右反転×縦横回転)
*/
//180度回転
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//90度回転が同型でなくても180度回転が同型であることもある
if(own>size-1){
l->COUNT4++;
return ;
}
}//end if
/**
2.クイーンが右上角以外にある場合、
(3)180度回転させてもオリジナルと異なる場合は、8個(左右反転×縦横回転×上下反転)
*/
//270度回転
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
}
// 非再帰 角にQがないときのバックトラック
void symmetry_backTrack_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>0){
if(bitmap[row]>0){
if(row<l->BOUND1){ //上部サイド枝刈り
bitmap[row]|=l->SIDEMASK;
bitmap[row]^=l->SIDEMASK;
}else if(row==l->BOUND2){ //下部サイド枝刈り
if((down[row]&l->SIDEMASK)==0){
row--;
}
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){
bitmap[row]&=l->SIDEMASK;
}
}
unsigned int save_bitmap=bitmap[row];
unsigned int bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
l->board[row]=bit; //Qを配置
if((bit&mask)!=0){
if(row==(size-1)){
if( (save_bitmap&l->LASTMASK)==0){
symmetryOps(size,l); //対称解除法
}
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}else{
row--;
}
}//end while
}
// 非再帰 角にQがあるときのバックトラック
void symmetry_backTrack_corner_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int bit=0;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>=2){
if(row<l->BOUND1){
// bitmap[row]=bitmap[row]|2;
// bitmap[row]=bitmap[row]^2;
bitmap[row]&=~2;
}
if(bitmap[row]>0){
bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
if(row==(size-1)){
l->COUNT8++;
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
l->board[row]=bit; //Qを配置
//クイーンが配置可能な位置を表す
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}//end while
}
// 非再帰 対称解除法
void symmetry_NR(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->SIDEMASK=l->LASTMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1&&l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
symmetry_backTrack_corner_NR(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Qを配置
//角にQがないときのバックトラック
symmetry_backTrack_NR(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
// 再帰 角にQがないときのバックトラック
void symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap&l->LASTMASK)==0){
l->board[row]=bitmap; //Qを配置
symmetryOps(size,l); //対称解除
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}//end while
}//end if
}
// 再帰 角にQがあるときのバックトラック
void symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
}
}else{
if(row<l->BOUND1){ //枝刈り
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Qを配置
symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// 再帰 対称解除法
void symmetry_R(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->LASTMASK=l->SIDEMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1 && l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}//end while
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Qを配置
//角にQがないときのバックトラック
symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
// GPU 対称解除法
__host__ __device__
void GPU_symmetryOps(unsigned int size,struct local* l)
{
/**
2.クイーンが右上角以外にある場合、
(1) 90度回転させてオリジナルと同型になる場合、さらに90度回転(オリジナルか
ら180度回転)させても、さらに90度回転(オリジナルから270度回転)させてもオリ
ジナルと同型になる。
こちらに該当するユニーク解が属するグループの要素数は、左右反転させたパター
ンを加えて2個しかありません。
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&& l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
// 90度回転して同型なら180度回転しても270度回転しても同型である
if(own>size-1){
l->COUNT2++;
return ;
}//end if
}//end if
/**
2.クイーンが右上角以外にある場合、
(2) 90度回転させてオリジナルと異なる場合は、270度回転させても必ずオリジナル
とは異なる。ただし、180度回転させた場合はオリジナルと同型になることも有り得
る。こちらに該当するユニーク解が属するグループの要素数は、180度回転させて同
型になる場合は4個(左右反転×縦横回転)
*/
//180度回転
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//90度回転が同型でなくても180度回転が同型であることもある
if(own>size-1){
l->COUNT4++;
return ;
}
}//end if
/**
2.クイーンが右上角以外にある場合、
(3)180度回転させてもオリジナルと異なる場合は、8個(左右反転×縦横回転×上下反転)
*/
//270度回転
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
}
// GPU 角にQがないときのバックトラック
__host__ __device__
void GPU_symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap& l->LASTMASK)==0){
l->board[row]=bitmap; //Qを配置
GPU_symmetryOps(size,l); //対称解除
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
GPU_symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}//end while
}//end if
}
// GPU 角にQがあるときのバックトラック
__host__ __device__
void GPU_symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
}
}else{
if(row<l->BOUND1){ //枝刈り
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Qを配置
GPU_symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// GPU クイーンの効きを判定して解を返す
__host__ __device__
long GPU_symmetry_solve_nodeLayer(unsigned int size,unsigned long left,unsigned long down,unsigned long right)
{
long mask=(1<<size)-1;
long counter=0;
if (down==mask) { // downがすべて専有され解が見つかる
return 1;
}
long bit=0;
for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){
bit=-bitmap&bitmap;
counter += GPU_symmetry_solve_nodeLayer(size,(left|bit)>>1,(down|bit),(right|bit)<< 1);
}
return counter;
}
// GPU 対称解除法
__host__ __device__
void GPU_symmetry_R(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->LASTMASK=l->SIDEMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1 && l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
GPU_symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}//end while
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Qを配置
//角にQがないときのバックトラック
GPU_symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
l->UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
l->TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
// ノードレイヤー i 番目のメンバを i 番目の部分木の解で埋める
__global__
void dim_nodeLayer(unsigned int size,long* nodes,long* solutions,unsigned int numElements)
{
int i=blockDim.x * blockIdx.x+threadIdx.x;
if(i<numElements){
solutions[i]=GPU_symmetry_solve_nodeLayer(size,nodes[3 * i],nodes[3 * i + 1],nodes[3 * i + 2]);
}
}
// ノードレイヤー 0以外のbitをカウント
int countBits_nodeLayer(long n)
{
int counter=0;
while (n){
n &= (n-1); // 右端のゼロ以外の数字を削除
counter++;
}
return counter;
}
// ノードレイヤー ノードをk番目のレイヤーのノードで埋める
long kLayer_nodeLayer(int size,std::vector<long>& nodes,int k,long left,long down,long right)
{
long counter=0;
long mask=(1<<size)-1;
// すべてのdownが埋まったら、解決策を見つけたことになる。
if (countBits_nodeLayer(down) == k) {
nodes.push_back(left);
nodes.push_back(down);
nodes.push_back(right);
return 1;
}
long bit=0;
for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){
bit=-bitmap&bitmap;
// 解を加えて対角線をずらす
counter+=kLayer_nodeLayer(size,nodes,k,(left|bit)>>1,(down|bit),(right|bit)<<1);
}
return counter;
}
// ノードレイヤー k 番目のレイヤのすべてのノードを含むベクトルを返す
std::vector<long> kLayer_nodeLayer(int size,int k)
{
std::vector<long> nodes{};
kLayer_nodeLayer(size,nodes,k,0,0,0);
return nodes;
}
// ノードレイヤーの作成
void symmetry_build_nodeLayer(int size)
{
// ツリーの3番目のレイヤーにあるノード
//(それぞれ連続する3つの数字でエンコードされる)のベクトル。
// レイヤー2以降はノードの数が均等なので、対称性を利用できる。
// レイヤ4には十分なノードがある(N16の場合、9844)。
/* local l[MAX]; // ローカル構造体 */
//std::vector<long> nodes=kLayer_nodeLayer(size,4,&l[0]);
std::vector<long> nodes=kLayer_nodeLayer(size,4);
// デバイスにはクラスがないので、
// 最初の要素を指定してからデバイスにコピーする。
size_t nodeSize=nodes.size() * sizeof(long);
long* hostNodes=(long*)malloc(nodeSize);
hostNodes=&nodes[0];
long* deviceNodes=NULL;
cudaMalloc((void**)&deviceNodes,nodeSize);
cudaMemcpy(deviceNodes,hostNodes,nodeSize,cudaMemcpyHostToDevice);
// デバイス出力の割り当て
long* deviceSolutions=NULL;
// 必要なのはノードの半分だけで、各ノードは3つの整数で符号化される。
int numSolutions=nodes.size() / 6;
size_t solutionSize=numSolutions * sizeof(long);
cudaMalloc((void**)&deviceSolutions,solutionSize);
// CUDAカーネルを起動する。
int threadsPerBlock=256;
int blocksPerGrid=(numSolutions + threadsPerBlock - 1) / threadsPerBlock;
dim_nodeLayer <<<blocksPerGrid,threadsPerBlock >>> (size,deviceNodes,deviceSolutions,numSolutions);
// 結果をホストにコピー
long* hostSolutions=(long*)malloc(solutionSize);
cudaMemcpy(hostSolutions,deviceSolutions,solutionSize,cudaMemcpyDeviceToHost);
// 部分解を加算し、結果を表示する。
long solutions=0;
for(long i=0;i<numSolutions;i++){
solutions += 2*hostSolutions[i]; // Symmetry
}
// 出力
TOTAL=solutions;
}
// CUDA 初期化
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
struct cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//メイン
int main(int argc,char** argv)
{
bool cpu=false,cpur=false,gpu=false,gpuNodeLayer=false;
int argstart=2;
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuNodeLayer=true;}
else{ gpuNodeLayer=true; } //デフォルトをgpuとする
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]);
printf(" -r: CPU 再帰\n");
printf(" -c: CPU 非再帰\n");
printf(" -g: GPU 再帰\n");
printf(" -n: GPU ノードレイヤー\n");
}
if(cpur){ printf("\n\n対称解除法 再帰 \n"); }
else if(cpu){ printf("\n\n対称解除法 非再帰 \n"); }
else if(gpu){ printf("\n\n対称解除法 GPU\n"); }
else if(gpuNodeLayer){ printf("\n\n対称解除法 GPUノードレイヤー \n"); }
if(cpu||cpur)
{
int min=4;
int targetN=17;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int size=min;size<=targetN;size++){
local l;
gettimeofday(&t0,NULL);//計測開始
if(cpur){ //再帰
symmetry_R(size,&l);
}
if(cpu){ //非再帰
symmetry_NR(size,&l);
}
//
gettimeofday(&t1,NULL);//計測終了
int ss;int ms;int dd;
if(t1.tv_usec<t0.tv_usec) {
dd=(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
}else {
dd=(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
} //end for
}//end if
if(gpu||gpuNodeLayer)
{
if(!InitCUDA()){return 0;}
/* int steps=24576; */
int min=4;
int targetN=21;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int size=min;size<=targetN;size++){
gettimeofday(&t0,NULL); // 計測開始
if(gpu){
TOTAL=UNIQUE=0;
local l[MAX];
GPU_symmetry_R(size,&l[0]);
TOTAL=l->TOTAL;
UNIQUE=l->UNIQUE;
}else if(gpuNodeLayer){
TOTAL=UNIQUE=0;
symmetry_build_nodeLayer(size); // 対称解除法
}
gettimeofday(&t1,NULL); // 計測終了
int ss;int ms;int dd;
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}//end for
}//end if
return 0;
}
|
5d564be92e273ef97cca0c53943534f301ed61ed.hip | // !!! This is a file automatically generated by hipify!!!
//==============================================================
// Copyright 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <chrono>
#include <vector>
#include <hip/hip_runtime.h>
#include "Projectile.hpp"
#ifdef DEBUG
static const int num_elements = 100;
#else
static const int num_elements = 10000000;
#endif
const float kPIValue = 3.1415;
const float kGValue = 9.81;
const int BLOCK_SIZE = 256;
// Function to calculate the range, maximum height and total flight time of a
// projectile
__global__ void CalculateRange(const Projectile *obj, Projectile *pObj) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= num_elements) return;
float proj_angle = obj[i].getangle();
float proj_vel = obj[i].getvelocity();
float sin_value = sin(proj_angle * kPIValue / 180.0f);
float cos_value = cos(proj_angle * kPIValue / 180.0f);
float total_time = fabs((2 * proj_vel * sin_value)) / kGValue;
float max_range = fabs(proj_vel * total_time * cos_value);
float max_height = (proj_vel * proj_vel * sin_value * sin_value) / 2.0f *
kGValue; // h = v^2 * sin^2theta/2g
pObj[i].setRangeandTime(max_range, total_time, proj_angle, proj_vel, max_height);
}
// in_vect and out_vect are the vectors with N Projectile numbers and are inputs to the
// parallel function
void GpuParallel(std::vector<Projectile>& in_vect,
std::vector<Projectile>& out_vect,
const int repeat)
{
Projectile *bufin_vect, *bufout_vect;
hipMalloc((void**)&bufin_vect, sizeof(Projectile) * num_elements);
hipMalloc((void**)&bufout_vect, sizeof(Projectile) * num_elements);
hipMemcpy(bufin_vect, in_vect.data(), sizeof(Projectile) * num_elements, hipMemcpyHostToDevice);
dim3 grids ((num_elements + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 blocks (BLOCK_SIZE);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( CalculateRange) , dim3(grids), dim3(blocks) , 0, 0, bufin_vect, bufout_vect);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
hipMemcpy(out_vect.data(), bufout_vect, sizeof(Projectile) * num_elements, hipMemcpyDeviceToHost);
hipFree(bufin_vect);
hipFree(bufout_vect);
}
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
float init_angle = 0.0f;
float init_vel = 0.0f;
vector<Projectile> input_vect1, out_parallel_vect2, out_scalar_vect3;
// Initialize the Input and Output vectors
srand(2);
for (int i = 0; i < num_elements; i++) {
init_angle = rand() % 90 + 10;
init_vel = rand() % 400 + 10;
input_vect1.push_back(Projectile(init_angle, init_vel, 1.0f, 1.0f, 1.0f));
out_parallel_vect2.push_back(Projectile());
out_scalar_vect3.push_back(Projectile());
}
// Call the DpcppParallel with the required inputs and outputs
GpuParallel(input_vect1, out_parallel_vect2, repeat);
#ifdef DEBUG
for (int i = 0; i < num_elements; i++)
{
// Displaying the Parallel computation results.
cout << "Parallel " << out_parallel_vect2[i];
}
#endif
return 0;
}
| 5d564be92e273ef97cca0c53943534f301ed61ed.cu | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <chrono>
#include <vector>
#include <cuda.h>
#include "Projectile.hpp"
#ifdef DEBUG
static const int num_elements = 100;
#else
static const int num_elements = 10000000;
#endif
const float kPIValue = 3.1415;
const float kGValue = 9.81;
const int BLOCK_SIZE = 256;
// Function to calculate the range, maximum height and total flight time of a
// projectile
__global__ void CalculateRange(const Projectile *obj, Projectile *pObj) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= num_elements) return;
float proj_angle = obj[i].getangle();
float proj_vel = obj[i].getvelocity();
float sin_value = sin(proj_angle * kPIValue / 180.0f);
float cos_value = cos(proj_angle * kPIValue / 180.0f);
float total_time = fabs((2 * proj_vel * sin_value)) / kGValue;
float max_range = fabs(proj_vel * total_time * cos_value);
float max_height = (proj_vel * proj_vel * sin_value * sin_value) / 2.0f *
kGValue; // h = v^2 * sin^2theta/2g
pObj[i].setRangeandTime(max_range, total_time, proj_angle, proj_vel, max_height);
}
// in_vect and out_vect are the vectors with N Projectile numbers and are inputs to the
// parallel function
void GpuParallel(std::vector<Projectile>& in_vect,
std::vector<Projectile>& out_vect,
const int repeat)
{
Projectile *bufin_vect, *bufout_vect;
cudaMalloc((void**)&bufin_vect, sizeof(Projectile) * num_elements);
cudaMalloc((void**)&bufout_vect, sizeof(Projectile) * num_elements);
cudaMemcpy(bufin_vect, in_vect.data(), sizeof(Projectile) * num_elements, cudaMemcpyHostToDevice);
dim3 grids ((num_elements + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 blocks (BLOCK_SIZE);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
CalculateRange <<< grids, blocks >>> (bufin_vect, bufout_vect);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
cudaMemcpy(out_vect.data(), bufout_vect, sizeof(Projectile) * num_elements, cudaMemcpyDeviceToHost);
cudaFree(bufin_vect);
cudaFree(bufout_vect);
}
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
float init_angle = 0.0f;
float init_vel = 0.0f;
vector<Projectile> input_vect1, out_parallel_vect2, out_scalar_vect3;
// Initialize the Input and Output vectors
srand(2);
for (int i = 0; i < num_elements; i++) {
init_angle = rand() % 90 + 10;
init_vel = rand() % 400 + 10;
input_vect1.push_back(Projectile(init_angle, init_vel, 1.0f, 1.0f, 1.0f));
out_parallel_vect2.push_back(Projectile());
out_scalar_vect3.push_back(Projectile());
}
// Call the DpcppParallel with the required inputs and outputs
GpuParallel(input_vect1, out_parallel_vect2, repeat);
#ifdef DEBUG
for (int i = 0; i < num_elements; i++)
{
// Displaying the Parallel computation results.
cout << "Parallel " << out_parallel_vect2[i];
}
#endif
return 0;
}
|
6286af70912d47e01803dbbf0976564f4da3b401.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MSECriterion.cu"
#else
void THNN_(MSECriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 3, input, target, output);
if (reduce) {
THCTensor_(resize1d)(state, output, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
accreal sum = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, (accreal) 0,
thrust::plus<accreal>(), mse_functor<real, accreal>());
if (sizeAverage)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
return;
}
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3<real, real, real>(
state,
input,
target,
output,
mse_updateOutput_functor<real>());
}
void THNN_(MSECriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 4, input, target, gradInput, gradOutput);
if (reduce) {
ptrdiff_t size = THCTensor_(nElement)(state, input);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
accreal norm = sizeAverage ? (accreal)(2)/size : (accreal)(2);
norm *= ScalarConvert<real, accreal>::to(THCTensor_(get1d)(state, gradOutput, 0));
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<real, accreal>(norm));
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
return;
}
THCUNN_check_shape(state, input, gradOutput);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradOutput_data(THCTensor_(data)(state, gradOutput));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<real, accreal>(2));
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
gradInput_data, gradInput_data+size, gradOutput_data, gradInput_data,
thrust::multiplies<real>());
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(free)(state, gradOutput);
}
#endif
| 6286af70912d47e01803dbbf0976564f4da3b401.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MSECriterion.cu"
#else
void THNN_(MSECriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 3, input, target, output);
if (reduce) {
THCTensor_(resize1d)(state, output, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
accreal sum = thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, (accreal) 0,
thrust::plus<accreal>(), mse_functor<real, accreal>());
if (sizeAverage)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
return;
}
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3<real, real, real>(
state,
input,
target,
output,
mse_updateOutput_functor<real>());
}
void THNN_(MSECriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 4, input, target, gradInput, gradOutput);
if (reduce) {
ptrdiff_t size = THCTensor_(nElement)(state, input);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
accreal norm = sizeAverage ? (accreal)(2)/size : (accreal)(2);
norm *= ScalarConvert<real, accreal>::to(THCTensor_(get1d)(state, gradOutput, 0));
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<real, accreal>(norm));
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
return;
}
THCUNN_check_shape(state, input, gradOutput);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradOutput_data(THCTensor_(data)(state, gradOutput));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<real, accreal>(2));
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
gradInput_data, gradInput_data+size, gradOutput_data, gradInput_data,
thrust::multiplies<real>());
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(free)(state, gradOutput);
}
#endif
|
78bae23958127e18442cdf4cecf69cff2e77bb03.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdio.h>
#include <vector>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include <sys/time.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
////////////////////////////////////////////////////////////////////////////////////////
// All cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// Read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// Color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// Include parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// Write to global memory: As an optimization, this code uses a float4
// store, which results in more efficient code than if it were coded as
// four separate float stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// Write to global memory: As an optimization, this code uses a float4
// store, which results in more efficient code than if it were coded as
// four separate float stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update positions of fireworks
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = M_PI;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// Determine the firework center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// Update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// Firework sparks
float sx = position[index3j];
float sy = position[index3j+1];
// Compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// Compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// Random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// Travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// Place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the position of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// Move the snowflake animation forward one time step. Update circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// Load from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// Hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// Add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// Drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// Update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// Update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// If the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// Restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// Store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// Given a pixel and a circle, determine the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];
float maxDist = rad * rad;
// Circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// There is a non-zero contribution. Now compute the shading value
// Suggestion: This conditional is in the inner loop. Although it
// will evaluate the same for all threads, there is overhead in
// setting up the lane masks, etc., to implement the conditional. It
// would be wise to perform this logic outside of the loops in
// kernelRenderCircles. (If feeling good about yourself, you
// could use some specialized template magic).
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// Simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// Global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
// kernelGetCirBinsCount -- (CUDA device code)
//
// Each thread counts how many bins there are in a corresponding circle.
__global__ void kernelGetCirBinsCount(uint* count, uint binPixLength) {
int circleIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (circleIdx >= cuConstRendererParams.numCircles)
return;
int circleIdx3 = circleIdx * 3;
float3 cen = *(float3*) (&cuConstRendererParams.position[circleIdx3]);
float rad = cuConstRendererParams.radius[circleIdx];
//Get bounding box by pixel index
short imgWidth = cuConstRendererParams.imageWidth;
short imgHeight = cuConstRendererParams.imageHeight;
short minX = fminf(fmaxf((cen.x - rad) * imgWidth , 0), imgWidth-1);
short maxX = fminf(fmaxf((cen.x + rad) * imgWidth , 0), imgWidth-1);
short minY = fminf(fmaxf((cen.y - rad) * imgHeight, 0), imgHeight-1);
short maxY = fminf(fmaxf((cen.y + rad) * imgHeight, 0), imgHeight-1);
short xbinStart = minX / binPixLength;
short xbinEnd = (maxX / binPixLength) + 1;
short ybinStart = minY / binPixLength;
short ybinEnd = (maxY / binPixLength) + 1;
count[circleIdx] = static_cast<uint>((xbinEnd - xbinStart) * (ybinEnd - ybinStart));
}
// kernelGetBin_CirInd -- (CUDA device code)
//
// Each thread corresponds to a circle, but it modifies two arrays containing all bins,
// one for containing circle index and the other with its bin index on the image grid.
// Seems like repetitive logic, but we needed to know the length beforehand!
__global__ void kernelGetCirBinsPair(uint* devInputCirIdxStart,
uint* devOutputBinsCir_Bin,
uint* devOutputBinsCir_Cir,
uint binNumLength,
uint binPixLength) {
int circleIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (circleIdx >= cuConstRendererParams.numCircles)
return;
int circleIdx3 = circleIdx * 3;
float3 cen = *(float3*) (&cuConstRendererParams.position[circleIdx3]);
float rad = cuConstRendererParams.radius[circleIdx];
//Get bounding box by pixel index
short imgWidth = cuConstRendererParams.imageWidth;
short imgHeight = cuConstRendererParams.imageHeight;
short minX = fminf(fmaxf((cen.x-rad) * imgWidth, 0), imgWidth-1);
short maxX = fminf(fmaxf((cen.x+rad) * imgWidth, 0), imgWidth-1);
short minY = fminf(fmaxf((cen.y-rad) * imgHeight, 0), imgHeight-1);
short maxY = fminf(fmaxf((cen.y+rad) * imgHeight, 0), imgHeight-1);
short xbinStart = minX / binPixLength;
short xbinEnd = (maxX / binPixLength) + 1;
short ybinStart = minY / binPixLength;
short ybinEnd = (maxY / binPixLength) + 1;
uint ind = devInputCirIdxStart[circleIdx];
//Row-major order!
for (uint y = ybinStart; y < ybinEnd; y++) {
uint binOffset = y * binNumLength;
for (uint x = xbinStart; x < xbinEnd; x++) {
devOutputBinsCir_Bin[ind] = binOffset + x;
devOutputBinsCir_Cir[ind] = circleIdx;
ind++;
}
}
}
// kernelGetBinStartIndex -- (CUDA device code)
//
// For each bin, we want to know its starting index in out array.
// We can set up shared memory across threads and check if the previous index
// is different from the current index.
__global__ void kernelGetBinStartIndex(uint* devOutputBinStartIndex,
uint* devInputCirBins_Bin,
uint binsCirLength) {
__shared__ int cache[257]; //blockDim.x + 1
int binsCirIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (binsCirIdx >= binsCirLength)
return;
if (threadIdx.x == 0) {
// Do most common case first for if-else
if (binsCirIdx != 0) {
cache[0] = devInputCirBins_Bin[binsCirIdx-1];
} else {
cache[0] = 0;
}
}
cache[1+threadIdx.x] = devInputCirBins_Bin[binsCirIdx];
// ------------------ //
__syncthreads(); //
// ------------------ //
int index = cache[1+threadIdx.x];
bool newBin = (index != cache[threadIdx.x]);
if (newBin) {
// printf("New bin at: %d, %u\n", index, binsCirIdx);
devOutputBinStartIndex[index] = binsCirIdx;
}
// ------------------ //
__syncthreads(); //
// ------------------ //
if (binsCirIdx == binsCirLength - 1) {
newBin = true;
binsCirIdx = (int) binsCirLength;
}
if (newBin) {
int j = index;
while (j > 0 && devOutputBinStartIndex[j] == 0) {
devOutputBinStartIndex[j] = (uint) binsCirIdx;
j--;
}
}
}
// kernelGetBinSizes -- (CUDA device code)
//
// Find the size of each bin (how many circles are inside), which is done with
// pairwise subtraction on the starting indices.
__global__ void kernelGetBinSizes(uint* devOutputBinSizes,
uint* devInputBinStartIndex,
uint binsTotal,
uint binsCirLength) {
__shared__ int cache[257]; //blockDim.x + 1
int binsCirIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (binsCirIdx >= binsTotal)
return;
if (threadIdx.x == blockDim.x - 1) {
// Do most common case first for if-else
if (binsCirIdx != binsTotal - 1) {
cache[threadIdx.x+1] = devInputBinStartIndex[binsCirIdx+1];
}
}
if (binsCirIdx == binsTotal - 1) {
cache[1+threadIdx.x] = binsCirLength;
}
cache[threadIdx.x] = devInputBinStartIndex[binsCirIdx];
__syncthreads();
devOutputBinSizes[binsCirIdx] = cache[1+threadIdx.x] - cache[threadIdx.x];
}
// kernelRenderCirclesTRUE -- (CUDA device code)
//
// My implementation of rendering circles properly. We need each thread to
// operate on a grid of pixels since if each thread rendered a circle separately,
// there is no guarantee of order being preserved.
//
__global__ void kernelRenderCirclesTRUE(uint* devCirBins_Cir,
uint* devBinStartIndex,
uint binNumLength,
uint binPixLength,
uint* devBinNumCir,
uint maxBinNumCir, bool conditional,
bool share) {
//extern keyword since dynamically allocated!
extern __shared__ float cache[];
float *shareCen = cache;
float *shareRad = cache + maxBinNumCir * 3;
float *shareCol = shareRad + maxBinNumCir;
//Find bin from pixel coordinate
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short pixX = blockDim.x * blockIdx.x + threadIdx.x;
short pixY = blockDim.y * blockIdx.y + threadIdx.y;
short binX = pixX / binPixLength;
short binY = pixY / binPixLength;
short binInd = binY * binNumLength + binX;
int binStart = devBinStartIndex[binInd];
int binSize = devBinNumCir[binInd];
short tCount = blockDim.x * blockDim.y;
short threadId = threadIdx.y * blockDim.x + threadIdx.x;
//Move radius and center to shared data
if (share)
for (int i = threadId; i < binSize; i += tCount) {
uint cirIndex = devCirBins_Cir[binStart + i];
shareRad[i] = cuConstRendererParams.radius[cirIndex];
*(float3*)(&shareCen[i * 3]) = *(float3*)(&cuConstRendererParams.position[cirIndex * 3]);
*(float3*)(&shareCol[i * 3]) = *(float3*)(&cuConstRendererParams.color[cirIndex * 3]);
}
//We do this now after moving stuff to shared data. Otheriwse results are weird!
if (pixX >= imageWidth || pixY >= imageHeight) {
return;
}
__syncthreads();
//Move lots of logic from shadePixel into here
float4 *imagePtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixY * imageWidth + pixX)]);
float4 newColor = *imagePtr;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
float2 pcen = make_float2(invWidth * (static_cast<float>(pixX) + 0.5f),
invHeight * (static_cast<float>(pixY) + 0.5f));
for (int i = 0; i < binSize; i++) {
uint cirIndex = devCirBins_Cir[binStart + i];
float3 cen = (share) ? *(float3*) (&shareCen[i*3]) :
*(float3*) (&cuConstRendererParams.position[cirIndex * 3]);
float rad = (share) ? shareRad[i] : cuConstRendererParams.radius[cirIndex];
float maxDist = rad * rad;
float diffX = pcen.x - cen.x;
float diffY = pcen.y - cen.y;
float pixelDist = diffX * diffX + diffY * diffY;
// Circle does not contribute to the image
if (pixelDist > maxDist) {
continue;
}
float3 rgb;
float alpha;
if (conditional) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-cen.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// Simple: each circle has an assigned color
rgb = (share) ? *(float3*) (&shareCol[i * 3]) :
*(float3*) (&cuConstRendererParams.color[cirIndex * 3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
//draw into to the pixel shared buffer
newColor.x = alpha*rgb.x + oneMinusAlpha * newColor.x;
newColor.y = alpha*rgb.y + oneMinusAlpha * newColor.y;
newColor.z = alpha*rgb.z + oneMinusAlpha * newColor.z;
newColor.w = alpha + newColor.w;
}
*imagePtr = newColor;
}
// kerneldoesIntersectCircle -- (CUDA device code)
//
__global__ void kernelDoesIntersectCircle (int **dev_result, short **box,
int numCir, int numPix) {
printf(":D");
int indexX = blockDim.x * blockIdx.x + threadIdx.x; //Circle idx
int indexY = blockDim.y * blockIdx.y + blockIdx.y; //Pixel idx
if (indexX >= numCir || indexY >= numPix)
return;
short imageWidth = cuConstRendererParams.imageWidth;
int pixX = indexY % imageWidth;
int pixY = indexY / imageWidth;
short *bbox = box[indexX];
if (pixX >= bbox[0] && pixX <= bbox[1] && pixY >= bbox[2] && pixY <= bbox[3]){
float rad = cuConstRendererParams.radius[indexX];
float3 p = *(float3*) (&cuConstRendererParams.position[indexX]);
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
float2 pcen = make_float2(invWidth * (static_cast<float>(pixX) + 0.5f),
invHeight * (static_cast<float>(pixY) + 0.5f));
float maxDist = rad * rad;
float diffX = pcen.x - p.x;
float diffY = pcen.y - p.y;
float pixelDist = diffX * diffX + diffY * diffY;
// Circle does not contribute to the image
if (pixelDist <= maxDist) {
dev_result[indexY][indexX] = 1;
return;
}
}
dev_result[indexY][indexX] = 0;
}
// kernelRenderCirclesBoxes-- (CUDA device code)
//
// Gets the bounding boxes for all circles
__global__ void kernelRenderCirclesBoxes (int numCir, short** box) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
float3 cen = *(float3*) (&cuConstRendererParams.position[3*index]);
float rad = cuConstRendererParams.radius[index];
//Get bounding box by pixel index
short imgWidth = cuConstRendererParams.imageWidth;
short imgHeight = cuConstRendererParams.imageHeight;
box[index][0] = fminf(fmaxf((cen.x-rad) * imgWidth, 0), imgWidth-1);
box[index][1] = fminf(fmaxf((cen.x+rad) * imgWidth, 0), imgWidth-1);
box[index][2] = fminf(fmaxf((cen.y-rad) * imgHeight, 0), imgHeight-1);
box[index][3] = fminf(fmaxf((cen.y+rad) * imgHeight, 0), imgHeight-1);
printf("yo");
}
// kernelRenderCirclesMAYBE-- (CUDA device code)
//
// Really naive implementation that does a circle across multiple threads
// Meant to be sequentially called across all circles.
__global__ void kernelRenderCirclesMAYBE (int** pointInCircle, int numCir,
short** box, bool conditional) {
printf("blah");
dim3 blockDim(256, 1);
dim3 gridDim((numCir + blockDim.x - 1) / blockDim.x);
short pixX = blockDim.x * blockIdx.x + threadIdx.x;
short pixY = blockDim.y * blockIdx.y + threadIdx.y;
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
if (pixX >= imageWidth || pixY >= imageHeight)
return;
int pixInd = pixY * imageWidth + pixX;
int* isInCircle = pointInCircle[pixInd];
float4 *imagePtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixY * imageWidth + pixX)]);
float4 newColor = *imagePtr;
for (int i = 0; i < numCir; i++) {
if (!isInCircle[i])
continue;
float rad = cuConstRendererParams.radius[i];
float3 p = *(float3*) (&cuConstRendererParams.position[i*3]);
float3 color = *(float3*) (&cuConstRendererParams.color[i*3]);
float3 rgb;
float alpha;
if (conditional) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float diffX = p.x - pixX;
float diffY = p.y - pixY;
float pixelDist = diffX * diffX + diffY * diffY;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// Simple: each circle has an assigned color
rgb = color;
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
//draw into to the pixel shared buffer
newColor.x = alpha*rgb.x + oneMinusAlpha * newColor.x;
newColor.y = alpha*rgb.y + oneMinusAlpha * newColor.y;
newColor.z = alpha*rgb.z + oneMinusAlpha * newColor.z;
newColor.w = alpha + newColor.w;
}
*imagePtr = newColor;
}
// kernelRenderCirclesFALSE -- (CUDA device code)
//
// Really naive implementation that does a circle across multiple threads
// Meant to be sequentially called across all circles.
__global__ void kernelRenderCirclesFALSE (short minX, short minY,
float3 p, float rad, float3 color,
bool conditional, int cirIndex) {
short pixX = blockDim.x * blockIdx.x + threadIdx.x + minX;
short pixY = blockDim.y * blockIdx.y + threadIdx.y + minY;
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float4 *imagePtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixY * imageWidth + pixX)]);
float4 newColor = *imagePtr;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
float2 pcen = make_float2(invWidth * (static_cast<float>(pixX) + 0.5f),
invHeight * (static_cast<float>(pixY) + 0.5f));
float maxDist = rad * rad;
float diffX = pcen.x - p.x;
float diffY = pcen.y - p.y;
float pixelDist = diffX * diffX + diffY * diffY;
// Circle does not contribute to the image
if (pixelDist > maxDist) {
return;
}
float3 rgb;
float alpha;
if (conditional) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// Simple: each circle has an assigned color
rgb = color;
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
//draw into to the pixel shared buffer
newColor.x = alpha*rgb.x + oneMinusAlpha * newColor.x;
newColor.y = alpha*rgb.y + oneMinusAlpha * newColor.y;
newColor.z = alpha*rgb.z + oneMinusAlpha * newColor.z;
newColor.w = alpha + newColor.w;
*imagePtr = newColor;
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
int index3 = 3 * index;
// Read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// Compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// A bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// For all pixels in the bonding box
for (int pixelY=screenMinY; pixelY<screenMaxY; pixelY++) {
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + screenMinX)]);
for (int pixelX=screenMinX; pixelX<screenMaxX; pixelX++) {
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
shadePixel(index, pixelCenterNorm, p, imgPtr);
imgPtr++;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceVelocity);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
hipFree(devPointInCircle);
hipFree(devCirBinsCount);
hipFree(devCirBinsIndex);
hipFree(devBinStartIndex);
hipFree(devBinNumCir);
}
}
const Image*
CudaRenderer::getImage() {
// Need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
bool isFastGPU = false;
std::string name;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
if (name.compare("GeForce GTX 1080") == 0)
{
isFastGPU = true;
}
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
if (!isFastGPU)
{
printf("WARNING: "
"You're not running on a fast GPU, please consider using "
"NVIDIA GTX 1080.\n");
printf("---------------------------------------------------------\n");
}
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
/** Setup code for new approach! */
//We want more bins if there are more circles, but we also want them to be aligned
//with 16x16 grids when running renderCircles.
//
if (numCircles < 100) {
binNumLength = 4;
} else if (numCircles >= 100 && numCircles < 1000) {
binNumLength = 8;
} else if (numCircles >= 1000 && numCircles < 10000) {
binNumLength = 16;
} else {
binNumLength = 64;
}
uint notBinPixLength = (image->width - 1)/binNumLength + 1; //standard
binPixLength = max(1, (notBinPixLength / 16)) * 16; //push to floor multiple of 16
binNumLength = (image->width - 1)/binPixLength + 1; //correct binNumLength
//allocate memory for cuda
hipMalloc(&devPointInCircle, sizeof(uint) * image->width * image->height);
hipMalloc(&devCirBinsCount, sizeof(uint) * numCircles);
hipMalloc(&devCirBinsIndex, sizeof(uint) * numCircles);
hipMalloc(&devBinStartIndex, sizeof(uint) * binNumLength * binNumLength);
hipMalloc(&devBinNumCir, sizeof(uint) * binNumLength * binNumLength);
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// Also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
hipMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// Copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
hipMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
hipLaunchKernelGGL(( kernelClearImageSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else {
hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
}
hipDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
hipLaunchKernelGGL(( kernelAdvanceSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == BOUNCING_BALLS) {
hipLaunchKernelGGL(( kernelAdvanceBouncingBalls), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == HYPNOSIS) {
hipLaunchKernelGGL(( kernelAdvanceHypnosis), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == FIREWORKS) {
hipLaunchKernelGGL(( kernelAdvanceFireWorks), dim3(gridDim), dim3(blockDim), 0, 0, );
}
hipDeviceSynchronize();
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
bool conditional = (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME);
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
/** Step 1: Get number of bins per circle
*/
hipLaunchKernelGGL(( kernelGetCirBinsCount), dim3(gridDim), dim3(blockDim), 0, 0, devCirBinsCount, binPixLength);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
uint delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
/** step 1.5: If we have reason to believe that the graph has really low density,
then we switch approach. This is either from having a very small
amount of circles, or a sparse pattern.
We can tell by checking how many circles are in each bin.
*/
if (numCircles < 5) {
for (int i = 0; i < numCircles; i++) {
float3 p = *(float3*)(&position[i*3]);
float rad = radius[i];
float3 col = *(float3*)(&color[i*3]);
int imgWidth = image->width;
int imgHeight = image->height;
int minX = fminf(fmaxf((p.x-rad) * imgWidth, 0), imgWidth-1);
int maxX = fminf(fmaxf((p.x+rad) * imgWidth, 0), imgWidth-1);
int minY = fminf(fmaxf((p.y-rad) * imgHeight, 0), imgHeight-1);
int maxY = fminf(fmaxf((p.y+rad) * imgHeight, 0), imgHeight-1);
printf("%d, %d | %d, %d\n", minX, maxX, minY, maxY);
dim3 pixelBlockDim(16,16);
dim3 pixelGridDim((maxX - minX) / pixelBlockDim.x + 1,
(maxY - minY) / pixelBlockDim.y + 1);
hipLaunchKernelGGL(( kernelRenderCirclesFALSE), dim3(pixelGridDim), dim3(pixelBlockDim), 0, 0, minX, minY,
p, rad,
col, conditional, i);
hipDeviceSynchronize();
}
return;
} /**else {
int imgWidth = image->width;
int imgHeight = image->height;
dim3 pixelBlockDim(16,16);
dim3 pixelGridDim((numCircles - 1) / pixelBlockDim.x + 1,
(imgWidth*imgHeight - 1) / pixelBlockDim.y + 1);
short** box;
hipMalloc(&box, 4 * sizeof(short) * numCircles);
kernelRenderCirclesBoxes<<<gridDim, blockDim>>>(numCircles, box);
//once per bin-circle
kernelDoesIntersectCircle<<<pixelGridDim, pixelBlockDim>>>(devPointInCircle, box,
numCircles, imgWidth*imgHeight);
//once per circle
kernelRenderCirclesMAYBE<<<gridDim, blockDim>>> (devPointInCircle,
numCircles,
box, conditional);
hipFree(box);
return;
}*/
/** Step 2: Get starting index of the bins per circle
Done by using thrust::exclusive_scan (sorry!)
*/
thrust::exclusive_scan(thrust::device_ptr<uint>(devCirBinsCount),
thrust::device_ptr<uint>(devCirBinsCount + numCircles),
thrust::device_ptr<uint>(devCirBinsIndex));
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
// Get how many bin-circle pairs there are
uint lastCirBinsCount, lastCirBinsIndex;
hipMemcpy(&lastCirBinsCount, devCirBinsCount + numCircles - 1,
sizeof(uint), hipMemcpyDeviceToHost);
hipMemcpy(&lastCirBinsIndex, devCirBinsIndex + numCircles - 1,
sizeof(uint), hipMemcpyDeviceToHost);
uint cirBinsLength = lastCirBinsCount + lastCirBinsIndex;
/** Step 3: Bind each bin with its circle and relative index within its circle
* 4: Sort by bin index (using thrust::stable_sort to preserve circle
* order)
*/
uint *devCirBins_Bin, *devCirBins_Cir;
hipMalloc(&devCirBins_Bin, sizeof(uint) * cirBinsLength);
hipMalloc(&devCirBins_Cir, sizeof(uint) * cirBinsLength);
hipLaunchKernelGGL(( kernelGetCirBinsPair), dim3(gridDim), dim3(blockDim), 0, 0, devCirBinsIndex,
devCirBins_Bin, devCirBins_Cir,
binNumLength, binPixLength);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
thrust::stable_sort_by_key(thrust::device_ptr<uint>(devCirBins_Bin),
thrust::device_ptr<uint>(devCirBins_Bin + cirBinsLength),
thrust::device_ptr<uint>(devCirBins_Cir));
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
/** Now that we have all the bins in order with circle order preserved, we
* can do each bin in parallel!
*
* Step 5: Find the starting index of each bin and how many circles are in there
*/
printf("Step 5\n");
// Still use 256 threads per block
uint numBins = binNumLength * binNumLength;
dim3 cirBinsGridDim((cirBinsLength + blockDim.x - 1) / blockDim.x);
hipLaunchKernelGGL(( kernelGetBinStartIndex), dim3(cirBinsGridDim), dim3(blockDim), 0, 0, devBinStartIndex,
devCirBins_Bin,
cirBinsLength);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
hipMemset(devBinStartIndex, 0, sizeof(uint));
dim3 binsGridDim((numBins + blockDim.x - 1) / blockDim.x);
hipLaunchKernelGGL(( kernelGetBinSizes), dim3(binsGridDim), dim3(blockDim), 0, 0, devBinNumCir,
devBinStartIndex,
numBins,
cirBinsLength);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
thrust::device_ptr<uint> result = thrust::max_element(thrust::device_ptr<uint>(devBinNumCir),
thrust::device_ptr<uint>(devBinNumCir + numBins));
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
uint maxBinNumCir = result[0];
/** Step 6: Finally render the circles, with each block of pixels being drawn
* on a separate thread.
*/
//printf("Step 6, %d\n", maxBinNumCir);
dim3 pixelBlockDim(16,16);
dim3 pixelGridDim((image->width - 1) / pixelBlockDim.x + 1,
(image->height - 1) / pixelBlockDim.y + 1);
if (maxBinNumCir < 1000) {
uint sharedMemSize = maxBinNumCir * (7*sizeof(float));
//3 for center coordinates
//1 for radius
//printf("%d\n", sharedMemSize); //3 for color
hipLaunchKernelGGL(( kernelRenderCirclesTRUE), dim3(pixelGridDim), dim3(pixelBlockDim),
sharedMemSize, 0, devCirBins_Cir,
devBinStartIndex,
binNumLength,
binPixLength,
devBinNumCir,
maxBinNumCir,
conditional,
true);
} else {
//Too much memory to share across threads!!!
hipLaunchKernelGGL(( kernelRenderCirclesTRUE), dim3(pixelGridDim), dim3(pixelBlockDim), 0, 0, devCirBins_Cir,
devBinStartIndex,
binNumLength,
binPixLength,
devBinNumCir,
maxBinNumCir,
conditional,
false);
}
// Initial solution given (BAD!)
// kernelRenderCircles<<<gridDim, blockDim>>>();
hipDeviceSynchronize();
hipFree(devCirBins_Bin);
hipFree(devCirBins_Cir);
}
| 78bae23958127e18442cdf4cecf69cff2e77bb03.cu | #include <string>
#include <algorithm>
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdio.h>
#include <vector>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include <sys/time.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
////////////////////////////////////////////////////////////////////////////////////////
// All cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// Read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// Color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// Include parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// Write to global memory: As an optimization, this code uses a float4
// store, which results in more efficient code than if it were coded as
// four separate float stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// Write to global memory: As an optimization, this code uses a float4
// store, which results in more efficient code than if it were coded as
// four separate float stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update positions of fireworks
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = M_PI;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// Determine the firework center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// Update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// Firework sparks
float sx = position[index3j];
float sy = position[index3j+1];
// Compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// Compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// Random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// Travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// Place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the position of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// Move the snowflake animation forward one time step. Update circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// Load from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// Hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// Add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// Drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// Update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// Update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// If the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// Restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// Store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// Given a pixel and a circle, determine the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];
float maxDist = rad * rad;
// Circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// There is a non-zero contribution. Now compute the shading value
// Suggestion: This conditional is in the inner loop. Although it
// will evaluate the same for all threads, there is overhead in
// setting up the lane masks, etc., to implement the conditional. It
// would be wise to perform this logic outside of the loops in
// kernelRenderCircles. (If feeling good about yourself, you
// could use some specialized template magic).
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// Simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// Global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
// kernelGetCirBinsCount -- (CUDA device code)
//
// Each thread counts how many bins there are in a corresponding circle.
__global__ void kernelGetCirBinsCount(uint* count, uint binPixLength) {
int circleIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (circleIdx >= cuConstRendererParams.numCircles)
return;
int circleIdx3 = circleIdx * 3;
float3 cen = *(float3*) (&cuConstRendererParams.position[circleIdx3]);
float rad = cuConstRendererParams.radius[circleIdx];
//Get bounding box by pixel index
short imgWidth = cuConstRendererParams.imageWidth;
short imgHeight = cuConstRendererParams.imageHeight;
short minX = fminf(fmaxf((cen.x - rad) * imgWidth , 0), imgWidth-1);
short maxX = fminf(fmaxf((cen.x + rad) * imgWidth , 0), imgWidth-1);
short minY = fminf(fmaxf((cen.y - rad) * imgHeight, 0), imgHeight-1);
short maxY = fminf(fmaxf((cen.y + rad) * imgHeight, 0), imgHeight-1);
short xbinStart = minX / binPixLength;
short xbinEnd = (maxX / binPixLength) + 1;
short ybinStart = minY / binPixLength;
short ybinEnd = (maxY / binPixLength) + 1;
count[circleIdx] = static_cast<uint>((xbinEnd - xbinStart) * (ybinEnd - ybinStart));
}
// kernelGetBin_CirInd -- (CUDA device code)
//
// Each thread corresponds to a circle, but it modifies two arrays containing all bins,
// one for containing circle index and the other with its bin index on the image grid.
// Seems like repetitive logic, but we needed to know the length beforehand!
__global__ void kernelGetCirBinsPair(uint* devInputCirIdxStart,
uint* devOutputBinsCir_Bin,
uint* devOutputBinsCir_Cir,
uint binNumLength,
uint binPixLength) {
int circleIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (circleIdx >= cuConstRendererParams.numCircles)
return;
int circleIdx3 = circleIdx * 3;
float3 cen = *(float3*) (&cuConstRendererParams.position[circleIdx3]);
float rad = cuConstRendererParams.radius[circleIdx];
//Get bounding box by pixel index
short imgWidth = cuConstRendererParams.imageWidth;
short imgHeight = cuConstRendererParams.imageHeight;
short minX = fminf(fmaxf((cen.x-rad) * imgWidth, 0), imgWidth-1);
short maxX = fminf(fmaxf((cen.x+rad) * imgWidth, 0), imgWidth-1);
short minY = fminf(fmaxf((cen.y-rad) * imgHeight, 0), imgHeight-1);
short maxY = fminf(fmaxf((cen.y+rad) * imgHeight, 0), imgHeight-1);
short xbinStart = minX / binPixLength;
short xbinEnd = (maxX / binPixLength) + 1;
short ybinStart = minY / binPixLength;
short ybinEnd = (maxY / binPixLength) + 1;
uint ind = devInputCirIdxStart[circleIdx];
//Row-major order!
for (uint y = ybinStart; y < ybinEnd; y++) {
uint binOffset = y * binNumLength;
for (uint x = xbinStart; x < xbinEnd; x++) {
devOutputBinsCir_Bin[ind] = binOffset + x;
devOutputBinsCir_Cir[ind] = circleIdx;
ind++;
}
}
}
// kernelGetBinStartIndex -- (CUDA device code)
//
// For each bin, we want to know its starting index in out array.
// We can set up shared memory across threads and check if the previous index
// is different from the current index.
__global__ void kernelGetBinStartIndex(uint* devOutputBinStartIndex,
uint* devInputCirBins_Bin,
uint binsCirLength) {
__shared__ int cache[257]; //blockDim.x + 1
int binsCirIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (binsCirIdx >= binsCirLength)
return;
if (threadIdx.x == 0) {
// Do most common case first for if-else
if (binsCirIdx != 0) {
cache[0] = devInputCirBins_Bin[binsCirIdx-1];
} else {
cache[0] = 0;
}
}
cache[1+threadIdx.x] = devInputCirBins_Bin[binsCirIdx];
// ------------------ //
__syncthreads(); //
// ------------------ //
int index = cache[1+threadIdx.x];
bool newBin = (index != cache[threadIdx.x]);
if (newBin) {
// printf("New bin at: %d, %u\n", index, binsCirIdx);
devOutputBinStartIndex[index] = binsCirIdx;
}
// ------------------ //
__syncthreads(); //
// ------------------ //
if (binsCirIdx == binsCirLength - 1) {
newBin = true;
binsCirIdx = (int) binsCirLength;
}
if (newBin) {
int j = index;
while (j > 0 && devOutputBinStartIndex[j] == 0) {
devOutputBinStartIndex[j] = (uint) binsCirIdx;
j--;
}
}
}
// kernelGetBinSizes -- (CUDA device code)
//
// Find the size of each bin (how many circles are inside), which is done with
// pairwise subtraction on the starting indices.
__global__ void kernelGetBinSizes(uint* devOutputBinSizes,
uint* devInputBinStartIndex,
uint binsTotal,
uint binsCirLength) {
__shared__ int cache[257]; //blockDim.x + 1
int binsCirIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (binsCirIdx >= binsTotal)
return;
if (threadIdx.x == blockDim.x - 1) {
// Do most common case first for if-else
if (binsCirIdx != binsTotal - 1) {
cache[threadIdx.x+1] = devInputBinStartIndex[binsCirIdx+1];
}
}
if (binsCirIdx == binsTotal - 1) {
cache[1+threadIdx.x] = binsCirLength;
}
cache[threadIdx.x] = devInputBinStartIndex[binsCirIdx];
__syncthreads();
devOutputBinSizes[binsCirIdx] = cache[1+threadIdx.x] - cache[threadIdx.x];
}
// kernelRenderCirclesTRUE -- (CUDA device code)
//
// My implementation of rendering circles properly. We need each thread to
// operate on a grid of pixels since if each thread rendered a circle separately,
// there is no guarantee of order being preserved.
//
__global__ void kernelRenderCirclesTRUE(uint* devCirBins_Cir,
uint* devBinStartIndex,
uint binNumLength,
uint binPixLength,
uint* devBinNumCir,
uint maxBinNumCir, bool conditional,
bool share) {
//extern keyword since dynamically allocated!
extern __shared__ float cache[];
float *shareCen = cache;
float *shareRad = cache + maxBinNumCir * 3;
float *shareCol = shareRad + maxBinNumCir;
//Find bin from pixel coordinate
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short pixX = blockDim.x * blockIdx.x + threadIdx.x;
short pixY = blockDim.y * blockIdx.y + threadIdx.y;
short binX = pixX / binPixLength;
short binY = pixY / binPixLength;
short binInd = binY * binNumLength + binX;
int binStart = devBinStartIndex[binInd];
int binSize = devBinNumCir[binInd];
short tCount = blockDim.x * blockDim.y;
short threadId = threadIdx.y * blockDim.x + threadIdx.x;
//Move radius and center to shared data
if (share)
for (int i = threadId; i < binSize; i += tCount) {
uint cirIndex = devCirBins_Cir[binStart + i];
shareRad[i] = cuConstRendererParams.radius[cirIndex];
*(float3*)(&shareCen[i * 3]) = *(float3*)(&cuConstRendererParams.position[cirIndex * 3]);
*(float3*)(&shareCol[i * 3]) = *(float3*)(&cuConstRendererParams.color[cirIndex * 3]);
}
//We do this now after moving stuff to shared data. Otheriwse results are weird!
if (pixX >= imageWidth || pixY >= imageHeight) {
return;
}
__syncthreads();
//Move lots of logic from shadePixel into here
float4 *imagePtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixY * imageWidth + pixX)]);
float4 newColor = *imagePtr;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
float2 pcen = make_float2(invWidth * (static_cast<float>(pixX) + 0.5f),
invHeight * (static_cast<float>(pixY) + 0.5f));
for (int i = 0; i < binSize; i++) {
uint cirIndex = devCirBins_Cir[binStart + i];
float3 cen = (share) ? *(float3*) (&shareCen[i*3]) :
*(float3*) (&cuConstRendererParams.position[cirIndex * 3]);
float rad = (share) ? shareRad[i] : cuConstRendererParams.radius[cirIndex];
float maxDist = rad * rad;
float diffX = pcen.x - cen.x;
float diffY = pcen.y - cen.y;
float pixelDist = diffX * diffX + diffY * diffY;
// Circle does not contribute to the image
if (pixelDist > maxDist) {
continue;
}
float3 rgb;
float alpha;
if (conditional) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-cen.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// Simple: each circle has an assigned color
rgb = (share) ? *(float3*) (&shareCol[i * 3]) :
*(float3*) (&cuConstRendererParams.color[cirIndex * 3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
//draw into to the pixel shared buffer
newColor.x = alpha*rgb.x + oneMinusAlpha * newColor.x;
newColor.y = alpha*rgb.y + oneMinusAlpha * newColor.y;
newColor.z = alpha*rgb.z + oneMinusAlpha * newColor.z;
newColor.w = alpha + newColor.w;
}
*imagePtr = newColor;
}
// kerneldoesIntersectCircle -- (CUDA device code)
//
__global__ void kernelDoesIntersectCircle (int **dev_result, short **box,
int numCir, int numPix) {
printf(":D");
int indexX = blockDim.x * blockIdx.x + threadIdx.x; //Circle idx
int indexY = blockDim.y * blockIdx.y + blockIdx.y; //Pixel idx
if (indexX >= numCir || indexY >= numPix)
return;
short imageWidth = cuConstRendererParams.imageWidth;
int pixX = indexY % imageWidth;
int pixY = indexY / imageWidth;
short *bbox = box[indexX];
if (pixX >= bbox[0] && pixX <= bbox[1] && pixY >= bbox[2] && pixY <= bbox[3]){
float rad = cuConstRendererParams.radius[indexX];
float3 p = *(float3*) (&cuConstRendererParams.position[indexX]);
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
float2 pcen = make_float2(invWidth * (static_cast<float>(pixX) + 0.5f),
invHeight * (static_cast<float>(pixY) + 0.5f));
float maxDist = rad * rad;
float diffX = pcen.x - p.x;
float diffY = pcen.y - p.y;
float pixelDist = diffX * diffX + diffY * diffY;
// Circle does not contribute to the image
if (pixelDist <= maxDist) {
dev_result[indexY][indexX] = 1;
return;
}
}
dev_result[indexY][indexX] = 0;
}
// kernelRenderCirclesBoxes-- (CUDA device code)
//
// Gets the bounding boxes for all circles
__global__ void kernelRenderCirclesBoxes (int numCir, short** box) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
float3 cen = *(float3*) (&cuConstRendererParams.position[3*index]);
float rad = cuConstRendererParams.radius[index];
//Get bounding box by pixel index
short imgWidth = cuConstRendererParams.imageWidth;
short imgHeight = cuConstRendererParams.imageHeight;
box[index][0] = fminf(fmaxf((cen.x-rad) * imgWidth, 0), imgWidth-1);
box[index][1] = fminf(fmaxf((cen.x+rad) * imgWidth, 0), imgWidth-1);
box[index][2] = fminf(fmaxf((cen.y-rad) * imgHeight, 0), imgHeight-1);
box[index][3] = fminf(fmaxf((cen.y+rad) * imgHeight, 0), imgHeight-1);
printf("yo");
}
// kernelRenderCirclesMAYBE-- (CUDA device code)
//
// Really naive implementation that does a circle across multiple threads
// Meant to be sequentially called across all circles.
__global__ void kernelRenderCirclesMAYBE (int** pointInCircle, int numCir,
short** box, bool conditional) {
printf("blah");
dim3 blockDim(256, 1);
dim3 gridDim((numCir + blockDim.x - 1) / blockDim.x);
short pixX = blockDim.x * blockIdx.x + threadIdx.x;
short pixY = blockDim.y * blockIdx.y + threadIdx.y;
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
if (pixX >= imageWidth || pixY >= imageHeight)
return;
int pixInd = pixY * imageWidth + pixX;
int* isInCircle = pointInCircle[pixInd];
float4 *imagePtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixY * imageWidth + pixX)]);
float4 newColor = *imagePtr;
for (int i = 0; i < numCir; i++) {
if (!isInCircle[i])
continue;
float rad = cuConstRendererParams.radius[i];
float3 p = *(float3*) (&cuConstRendererParams.position[i*3]);
float3 color = *(float3*) (&cuConstRendererParams.color[i*3]);
float3 rgb;
float alpha;
if (conditional) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float diffX = p.x - pixX;
float diffY = p.y - pixY;
float pixelDist = diffX * diffX + diffY * diffY;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// Simple: each circle has an assigned color
rgb = color;
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
//draw into to the pixel shared buffer
newColor.x = alpha*rgb.x + oneMinusAlpha * newColor.x;
newColor.y = alpha*rgb.y + oneMinusAlpha * newColor.y;
newColor.z = alpha*rgb.z + oneMinusAlpha * newColor.z;
newColor.w = alpha + newColor.w;
}
*imagePtr = newColor;
}
// kernelRenderCirclesFALSE -- (CUDA device code)
//
// Really naive implementation that does a circle across multiple threads
// Meant to be sequentially called across all circles.
__global__ void kernelRenderCirclesFALSE (short minX, short minY,
float3 p, float rad, float3 color,
bool conditional, int cirIndex) {
short pixX = blockDim.x * blockIdx.x + threadIdx.x + minX;
short pixY = blockDim.y * blockIdx.y + threadIdx.y + minY;
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
float4 *imagePtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixY * imageWidth + pixX)]);
float4 newColor = *imagePtr;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
float2 pcen = make_float2(invWidth * (static_cast<float>(pixX) + 0.5f),
invHeight * (static_cast<float>(pixY) + 0.5f));
float maxDist = rad * rad;
float diffX = pcen.x - p.x;
float diffY = pcen.y - p.y;
float pixelDist = diffX * diffX + diffY * diffY;
// Circle does not contribute to the image
if (pixelDist > maxDist) {
return;
}
float3 rgb;
float alpha;
if (conditional) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// Simple: each circle has an assigned color
rgb = color;
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
//draw into to the pixel shared buffer
newColor.x = alpha*rgb.x + oneMinusAlpha * newColor.x;
newColor.y = alpha*rgb.y + oneMinusAlpha * newColor.y;
newColor.z = alpha*rgb.z + oneMinusAlpha * newColor.z;
newColor.w = alpha + newColor.w;
*imagePtr = newColor;
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
int index3 = 3 * index;
// Read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// Compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// A bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// For all pixels in the bonding box
for (int pixelY=screenMinY; pixelY<screenMaxY; pixelY++) {
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + screenMinX)]);
for (int pixelX=screenMinX; pixelX<screenMaxX; pixelX++) {
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
shadePixel(index, pixelCenterNorm, p, imgPtr);
imgPtr++;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceVelocity);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
cudaFree(devPointInCircle);
cudaFree(devCirBinsCount);
cudaFree(devCirBinsIndex);
cudaFree(devBinStartIndex);
cudaFree(devBinNumCir);
}
}
const Image*
CudaRenderer::getImage() {
// Need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
bool isFastGPU = false;
std::string name;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
if (name.compare("GeForce GTX 1080") == 0)
{
isFastGPU = true;
}
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
if (!isFastGPU)
{
printf("WARNING: "
"You're not running on a fast GPU, please consider using "
"NVIDIA GTX 1080.\n");
printf("---------------------------------------------------------\n");
}
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
/** Setup code for new approach! */
//We want more bins if there are more circles, but we also want them to be aligned
//with 16x16 grids when running renderCircles.
//
if (numCircles < 100) {
binNumLength = 4;
} else if (numCircles >= 100 && numCircles < 1000) {
binNumLength = 8;
} else if (numCircles >= 1000 && numCircles < 10000) {
binNumLength = 16;
} else {
binNumLength = 64;
}
uint notBinPixLength = (image->width - 1)/binNumLength + 1; //standard
binPixLength = max(1, (notBinPixLength / 16)) * 16; //push to floor multiple of 16
binNumLength = (image->width - 1)/binPixLength + 1; //correct binNumLength
//allocate memory for cuda
cudaMalloc(&devPointInCircle, sizeof(uint) * image->width * image->height);
cudaMalloc(&devCirBinsCount, sizeof(uint) * numCircles);
cudaMalloc(&devCirBinsIndex, sizeof(uint) * numCircles);
cudaMalloc(&devBinStartIndex, sizeof(uint) * binNumLength * binNumLength);
cudaMalloc(&devBinNumCir, sizeof(uint) * binNumLength * binNumLength);
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// Also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
cudaMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// Copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
cudaMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
kernelClearImageSnowflake<<<gridDim, blockDim>>>();
} else {
kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
}
cudaDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
kernelAdvanceSnowflake<<<gridDim, blockDim>>>();
} else if (sceneName == BOUNCING_BALLS) {
kernelAdvanceBouncingBalls<<<gridDim, blockDim>>>();
} else if (sceneName == HYPNOSIS) {
kernelAdvanceHypnosis<<<gridDim, blockDim>>>();
} else if (sceneName == FIREWORKS) {
kernelAdvanceFireWorks<<<gridDim, blockDim>>>();
}
cudaDeviceSynchronize();
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
bool conditional = (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME);
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
/** Step 1: Get number of bins per circle
*/
kernelGetCirBinsCount<<<gridDim, blockDim>>>(devCirBinsCount, binPixLength);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
uint delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
/** step 1.5: If we have reason to believe that the graph has really low density,
then we switch approach. This is either from having a very small
amount of circles, or a sparse pattern.
We can tell by checking how many circles are in each bin.
*/
if (numCircles < 5) {
for (int i = 0; i < numCircles; i++) {
float3 p = *(float3*)(&position[i*3]);
float rad = radius[i];
float3 col = *(float3*)(&color[i*3]);
int imgWidth = image->width;
int imgHeight = image->height;
int minX = fminf(fmaxf((p.x-rad) * imgWidth, 0), imgWidth-1);
int maxX = fminf(fmaxf((p.x+rad) * imgWidth, 0), imgWidth-1);
int minY = fminf(fmaxf((p.y-rad) * imgHeight, 0), imgHeight-1);
int maxY = fminf(fmaxf((p.y+rad) * imgHeight, 0), imgHeight-1);
printf("%d, %d | %d, %d\n", minX, maxX, minY, maxY);
dim3 pixelBlockDim(16,16);
dim3 pixelGridDim((maxX - minX) / pixelBlockDim.x + 1,
(maxY - minY) / pixelBlockDim.y + 1);
kernelRenderCirclesFALSE<<<pixelGridDim, pixelBlockDim>>>(minX, minY,
p, rad,
col, conditional, i);
cudaDeviceSynchronize();
}
return;
} /**else {
int imgWidth = image->width;
int imgHeight = image->height;
dim3 pixelBlockDim(16,16);
dim3 pixelGridDim((numCircles - 1) / pixelBlockDim.x + 1,
(imgWidth*imgHeight - 1) / pixelBlockDim.y + 1);
short** box;
cudaMalloc(&box, 4 * sizeof(short) * numCircles);
kernelRenderCirclesBoxes<<<gridDim, blockDim>>>(numCircles, box);
//once per bin-circle
kernelDoesIntersectCircle<<<pixelGridDim, pixelBlockDim>>>(devPointInCircle, box,
numCircles, imgWidth*imgHeight);
//once per circle
kernelRenderCirclesMAYBE<<<gridDim, blockDim>>> (devPointInCircle,
numCircles,
box, conditional);
cudaFree(box);
return;
}*/
/** Step 2: Get starting index of the bins per circle
Done by using thrust::exclusive_scan (sorry!)
*/
thrust::exclusive_scan(thrust::device_ptr<uint>(devCirBinsCount),
thrust::device_ptr<uint>(devCirBinsCount + numCircles),
thrust::device_ptr<uint>(devCirBinsIndex));
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
// Get how many bin-circle pairs there are
uint lastCirBinsCount, lastCirBinsIndex;
cudaMemcpy(&lastCirBinsCount, devCirBinsCount + numCircles - 1,
sizeof(uint), cudaMemcpyDeviceToHost);
cudaMemcpy(&lastCirBinsIndex, devCirBinsIndex + numCircles - 1,
sizeof(uint), cudaMemcpyDeviceToHost);
uint cirBinsLength = lastCirBinsCount + lastCirBinsIndex;
/** Step 3: Bind each bin with its circle and relative index within its circle
* 4: Sort by bin index (using thrust::stable_sort to preserve circle
* order)
*/
uint *devCirBins_Bin, *devCirBins_Cir;
cudaMalloc(&devCirBins_Bin, sizeof(uint) * cirBinsLength);
cudaMalloc(&devCirBins_Cir, sizeof(uint) * cirBinsLength);
kernelGetCirBinsPair<<<gridDim, blockDim>>>(devCirBinsIndex,
devCirBins_Bin, devCirBins_Cir,
binNumLength, binPixLength);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
thrust::stable_sort_by_key(thrust::device_ptr<uint>(devCirBins_Bin),
thrust::device_ptr<uint>(devCirBins_Bin + cirBinsLength),
thrust::device_ptr<uint>(devCirBins_Cir));
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
/** Now that we have all the bins in order with circle order preserved, we
* can do each bin in parallel!
*
* Step 5: Find the starting index of each bin and how many circles are in there
*/
printf("Step 5\n");
// Still use 256 threads per block
uint numBins = binNumLength * binNumLength;
dim3 cirBinsGridDim((cirBinsLength + blockDim.x - 1) / blockDim.x);
kernelGetBinStartIndex<<<cirBinsGridDim, blockDim>>>(devBinStartIndex,
devCirBins_Bin,
cirBinsLength);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
cudaMemset(devBinStartIndex, 0, sizeof(uint));
dim3 binsGridDim((numBins + blockDim.x - 1) / blockDim.x);
kernelGetBinSizes<<<binsGridDim, blockDim>>>(devBinNumCir,
devBinStartIndex,
numBins,
cirBinsLength);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
thrust::device_ptr<uint> result = thrust::max_element(thrust::device_ptr<uint>(devBinNumCir),
thrust::device_ptr<uint>(devBinNumCir + numBins));
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Time: %u\n", delta_us);
uint maxBinNumCir = result[0];
/** Step 6: Finally render the circles, with each block of pixels being drawn
* on a separate thread.
*/
//printf("Step 6, %d\n", maxBinNumCir);
dim3 pixelBlockDim(16,16);
dim3 pixelGridDim((image->width - 1) / pixelBlockDim.x + 1,
(image->height - 1) / pixelBlockDim.y + 1);
if (maxBinNumCir < 1000) {
uint sharedMemSize = maxBinNumCir * (7*sizeof(float));
//3 for center coordinates
//1 for radius
//printf("%d\n", sharedMemSize); //3 for color
kernelRenderCirclesTRUE<<<pixelGridDim, pixelBlockDim,
sharedMemSize>>>(devCirBins_Cir,
devBinStartIndex,
binNumLength,
binPixLength,
devBinNumCir,
maxBinNumCir,
conditional,
true);
} else {
//Too much memory to share across threads!!!
kernelRenderCirclesTRUE<<<pixelGridDim, pixelBlockDim>>>(devCirBins_Cir,
devBinStartIndex,
binNumLength,
binPixLength,
devBinNumCir,
maxBinNumCir,
conditional,
false);
}
// Initial solution given (BAD!)
// kernelRenderCircles<<<gridDim, blockDim>>>();
cudaDeviceSynchronize();
cudaFree(devCirBins_Bin);
cudaFree(devCirBins_Cir);
}
|
33c68c9a6783f0f060a54396b36bd32723cc959f.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/ExpandUtils.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseCsrTensorImpl.h>
#include <ATen/SparseCsrTensorUtils.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Resize.h>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <type_traits>
#include <THH/THHThrustAllocator.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/native/sparse/hip/SparseHIPTensorMath.cuh>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/sequence.h>
namespace at {
namespace native {
using namespace at::sparse_csr;
// certain utiliy functions are usable from sparse COO.
using namespace at::sparse;
Tensor& addmm_out_sparse_csr_dense_cuda(
const Tensor& self,
const SparseCsrTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha,
Tensor& r)
{
TORCH_INTERNAL_ASSERT(sparse.is_sparse_csr());
Tensor t = *expand_size(self, {sparse.size(0), dense.size(1)}, "addmm_out_sparse_csr");
TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm expected 't' to be CUDA tensor");
TORCH_CHECK(
r.is_cuda(),
"Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(
sparse.is_cuda(),
"Expected all tensors to be on the same device. addmm: expected 'mat1' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(
dense.is_cuda(),
"Expected all tensors to be on the same device. addmm: expected 'mat2' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(
sparse.dim() == 2,
"addmm: 2-D matrices expected, got ",
sparse.dim(),
"D tensor");
TORCH_CHECK(
dense.dim() == 2,
"addmm: 2-D matrices expected, got ",
dense.dim(),
"D tensor");
TORCH_CHECK(
r.is_contiguous(),
"out argument must be contiguous, but got: ",
r.suggest_memory_format());
// mxk * kxn = mxn
int64_t m = sparse.size(0);
int64_t k = sparse.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(
dense.size(0) == k,
"addmm: Expected dense matrix (dense) size(0)=",
k,
", got ",
dense.size(0));
resize_output(r, {m, n});
int64_t nnz = sparse._nnz();
if (nnz == 0) {
at::mul_out(r, t, at::scalar_tensor(beta, r.options()));
return r;
}
// TODO: Check if hipsparseSpMM can use 64-bit indices
// https://docs.nvidia.com/cuda/cusparse/index.html
auto col_indices = sparse.col_indices().to(at::kInt);
auto crow_indices = sparse.crow_indices().to(at::kInt);
auto values = sparse.values();
s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r, beta, t, alpha, crow_indices, col_indices, values, dense);
return r;
}
Tensor& add_out_dense_sparse_csr_cuda(
Tensor& output,
const Tensor& dense,
const SparseCsrTensor& src,
const Scalar& alpha) {
TORCH_INTERNAL_ASSERT(dense.layout() == kStrided);
TORCH_INTERNAL_ASSERT(src.is_sparse_csr());
TORCH_INTERNAL_ASSERT(dense.is_cuda());
TORCH_CHECK(
output.is_contiguous(),
"out argument must be contiguous, but got: ",
output.suggest_memory_format());
TORCH_CHECK(
output.is_cuda(),
"add: expected 'out' to be CUDA tensor, but got tensor on device: ",
output.device());
TORCH_CHECK(
src.is_cuda(),
"add: expected 'other' to be a CUDA tensor, but got tensor on device: ",
src.device());
TORCH_CHECK(
dense.sizes().equals(src.sizes()),
"add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(),
" while other has size ",
src.sizes(),
" (FYI: dense-sparse addition does not currently support broadcasting)");
auto commonDtype = promoteTypes(dense.scalar_type(), src.scalar_type());
TORCH_CHECK(
canCast(commonDtype, output.scalar_type()),
"Can't convert result type ",
commonDtype,
" to output ",
output.scalar_type(),
" in add operation");
Tensor src_values = src.values();
Tensor src_crow_indices = src.crow_indices();
Tensor src_col_indices = src.col_indices();
resize_output(output, dense.sizes());
Tensor resultBuffer = output;
Tensor valuesBuffer = src_values.to(commonDtype);
if (output.scalar_type() != commonDtype) {
resultBuffer = dense.to(commonDtype);
} else if (!is_same_tensor(output, dense)) {
resultBuffer.copy_(dense);
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kHalf, kBool, kBFloat16,
commonDtype,
"add_out_op2_sparse_csr",
[&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() {
AT_DISPATCH_INDEX_TYPES(
src_crow_indices.scalar_type(),
"csr_add_out_crow_indices",
[&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() {
scalar_t* values_accessor = valuesBuffer.data_ptr<scalar_t>();
scalar_t* out_ptr = resultBuffer.data_ptr<scalar_t>();
scalar_t cast_value = alpha.to<scalar_t>();
index_t* crow_indices_accessor = src_crow_indices.data_ptr<index_t>();
index_t* col_indices_accessor = src_col_indices.data_ptr<index_t>();
int64_t out_storage_offset = resultBuffer.storage_offset();
auto out_strides = resultBuffer.strides();
int64_t out_strides0 = out_strides[0];
int64_t out_strides1 = out_strides[1];
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Note that this could be wildly imbalanced if the sparsity pattern varies a lot between rows.
thrust::for_each(
policy,
thrust::make_counting_iterator(int64_t(0)),
thrust::make_counting_iterator(int64_t(src_crow_indices.size(0) - 1)),
[values_accessor,
crow_indices_accessor,
col_indices_accessor,
out_ptr,
out_storage_offset,
out_strides0,
cast_value,
out_strides1
]__device__(int64_t irow) {
index_t start_index = crow_indices_accessor[irow];
index_t end_index = crow_indices_accessor[irow + 1];
for (index_t i = start_index; i < end_index; ++i) {
auto icol = col_indices_accessor[i];
auto index = out_storage_offset + irow * out_strides0 + icol * out_strides1;
out_ptr[index] += cast_value * values_accessor[i];
}
});
});
});
if (output.scalar_type() != commonDtype) {
output.copy_(resultBuffer);
}
return output;
}
Tensor& add_out_sparse_csr_cuda(
const Tensor& self,
const SparseCsrTensor& other,
const Scalar& alpha,
SparseCsrTensor& out) {
if (self.layout() == kStrided) {
return add_out_dense_sparse_csr_cuda(out, self, other, alpha);
} else {
TORCH_CHECK(
false,
"NotImplementedError: Addition of sparse CSR tensors is not yet implemented.")
}
return out;
}
} // namespace native
} // namespace at
| 33c68c9a6783f0f060a54396b36bd32723cc959f.cu | #include <ATen/ATen.h>
#include <ATen/ExpandUtils.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseCsrTensorImpl.h>
#include <ATen/SparseCsrTensorUtils.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Resize.h>
#include <algorithm>
#include <cuda_runtime.h>
#include <type_traits>
#include <THC/THCThrustAllocator.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAUtils.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <ATen/native/sparse/cuda/SparseCUDATensorMath.cuh>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/sequence.h>
namespace at {
namespace native {
using namespace at::sparse_csr;
// certain utiliy functions are usable from sparse COO.
using namespace at::sparse;
Tensor& addmm_out_sparse_csr_dense_cuda(
const Tensor& self,
const SparseCsrTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha,
Tensor& r)
{
TORCH_INTERNAL_ASSERT(sparse.is_sparse_csr());
Tensor t = *expand_size(self, {sparse.size(0), dense.size(1)}, "addmm_out_sparse_csr");
TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm expected 't' to be CUDA tensor");
TORCH_CHECK(
r.is_cuda(),
"Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(
sparse.is_cuda(),
"Expected all tensors to be on the same device. addmm: expected 'mat1' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(
dense.is_cuda(),
"Expected all tensors to be on the same device. addmm: expected 'mat2' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(
sparse.dim() == 2,
"addmm: 2-D matrices expected, got ",
sparse.dim(),
"D tensor");
TORCH_CHECK(
dense.dim() == 2,
"addmm: 2-D matrices expected, got ",
dense.dim(),
"D tensor");
TORCH_CHECK(
r.is_contiguous(),
"out argument must be contiguous, but got: ",
r.suggest_memory_format());
// mxk * kxn = mxn
int64_t m = sparse.size(0);
int64_t k = sparse.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(
dense.size(0) == k,
"addmm: Expected dense matrix (dense) size(0)=",
k,
", got ",
dense.size(0));
resize_output(r, {m, n});
int64_t nnz = sparse._nnz();
if (nnz == 0) {
at::mul_out(r, t, at::scalar_tensor(beta, r.options()));
return r;
}
// TODO: Check if cusparseSpMM can use 64-bit indices
// https://docs.nvidia.com/cuda/cusparse/index.html
auto col_indices = sparse.col_indices().to(at::kInt);
auto crow_indices = sparse.crow_indices().to(at::kInt);
auto values = sparse.values();
s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r, beta, t, alpha, crow_indices, col_indices, values, dense);
return r;
}
Tensor& add_out_dense_sparse_csr_cuda(
Tensor& output,
const Tensor& dense,
const SparseCsrTensor& src,
const Scalar& alpha) {
TORCH_INTERNAL_ASSERT(dense.layout() == kStrided);
TORCH_INTERNAL_ASSERT(src.is_sparse_csr());
TORCH_INTERNAL_ASSERT(dense.is_cuda());
TORCH_CHECK(
output.is_contiguous(),
"out argument must be contiguous, but got: ",
output.suggest_memory_format());
TORCH_CHECK(
output.is_cuda(),
"add: expected 'out' to be CUDA tensor, but got tensor on device: ",
output.device());
TORCH_CHECK(
src.is_cuda(),
"add: expected 'other' to be a CUDA tensor, but got tensor on device: ",
src.device());
TORCH_CHECK(
dense.sizes().equals(src.sizes()),
"add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(),
" while other has size ",
src.sizes(),
" (FYI: dense-sparse addition does not currently support broadcasting)");
auto commonDtype = promoteTypes(dense.scalar_type(), src.scalar_type());
TORCH_CHECK(
canCast(commonDtype, output.scalar_type()),
"Can't convert result type ",
commonDtype,
" to output ",
output.scalar_type(),
" in add operation");
Tensor src_values = src.values();
Tensor src_crow_indices = src.crow_indices();
Tensor src_col_indices = src.col_indices();
resize_output(output, dense.sizes());
Tensor resultBuffer = output;
Tensor valuesBuffer = src_values.to(commonDtype);
if (output.scalar_type() != commonDtype) {
resultBuffer = dense.to(commonDtype);
} else if (!is_same_tensor(output, dense)) {
resultBuffer.copy_(dense);
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kHalf, kBool, kBFloat16,
commonDtype,
"add_out_op2_sparse_csr",
[&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() {
AT_DISPATCH_INDEX_TYPES(
src_crow_indices.scalar_type(),
"csr_add_out_crow_indices",
[&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() {
scalar_t* values_accessor = valuesBuffer.data_ptr<scalar_t>();
scalar_t* out_ptr = resultBuffer.data_ptr<scalar_t>();
scalar_t cast_value = alpha.to<scalar_t>();
index_t* crow_indices_accessor = src_crow_indices.data_ptr<index_t>();
index_t* col_indices_accessor = src_col_indices.data_ptr<index_t>();
int64_t out_storage_offset = resultBuffer.storage_offset();
auto out_strides = resultBuffer.strides();
int64_t out_strides0 = out_strides[0];
int64_t out_strides1 = out_strides[1];
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Note that this could be wildly imbalanced if the sparsity pattern varies a lot between rows.
thrust::for_each(
policy,
thrust::make_counting_iterator(int64_t(0)),
thrust::make_counting_iterator(int64_t(src_crow_indices.size(0) - 1)),
[values_accessor,
crow_indices_accessor,
col_indices_accessor,
out_ptr,
out_storage_offset,
out_strides0,
cast_value,
out_strides1
]__device__(int64_t irow) {
index_t start_index = crow_indices_accessor[irow];
index_t end_index = crow_indices_accessor[irow + 1];
for (index_t i = start_index; i < end_index; ++i) {
auto icol = col_indices_accessor[i];
auto index = out_storage_offset + irow * out_strides0 + icol * out_strides1;
out_ptr[index] += cast_value * values_accessor[i];
}
});
});
});
if (output.scalar_type() != commonDtype) {
output.copy_(resultBuffer);
}
return output;
}
Tensor& add_out_sparse_csr_cuda(
const Tensor& self,
const SparseCsrTensor& other,
const Scalar& alpha,
SparseCsrTensor& out) {
if (self.layout() == kStrided) {
return add_out_dense_sparse_csr_cuda(out, self, other, alpha);
} else {
TORCH_CHECK(
false,
"NotImplementedError: Addition of sparse CSR tensors is not yet implemented.")
}
return out;
}
} // namespace native
} // namespace at
|
cbc0f0033c870903587fcd027d75550190e2eb7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
const int TRY_COUNT = 512;
void fetch_data(char *file_name, float **mat1, float **mat2, int *mat_size) {
FILE *file = fopen(file_name, "r");
fscanf(file, "SizeA= %d", mat_size);
float *matA = (float*) malloc(sizeof(float) * (*mat_size));
for (int i = 0; i < (*mat_size); i++) {
fscanf(file, "%f", &(matA[i]));
}
fscanf(file, " ");
fscanf(file, "SizeB= %d", mat_size);
float *matB = (float*) malloc(sizeof(float) * (*mat_size));
for (int i = 0; i < (*mat_size); i++) {
fscanf(file, "%f", &(matB[i]));
}
*mat1 = matA;
*mat2 = matB;
fclose(file);
}
__global__
void dot_product(float *mat1, float *mat2, float *tmp, int *mat_size, float *result) {
tmp[threadIdx.x] = mat1[threadIdx.x] * mat2[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) {
float sum = 0;
for(int i = 0; i < (*mat_size); i++) {
sum += tmp[i];
}
*result = sum;
}
}
int main(int argc, char **argv) {
if (argc != 2) {
printf("Please provide file name!");
exit(1);
}
float *mat1, *mat2;
int mat_size;
fetch_data(argv[1], &mat1, &mat2, &mat_size);
float *cuda_mat1, *cuda_mat2, *cuda_tmp, *cuda_result;
int *cuda_mat_size;
float result_initial_value = 0;
double time_total = 0;
float result;
if (mat_size <= 1000) {
hipMalloc(&cuda_mat1, sizeof(float) * mat_size);
hipMalloc(&cuda_mat2, sizeof(float) * mat_size);
hipMalloc(&cuda_tmp, sizeof(float) * mat_size);
hipMalloc(&cuda_mat_size, sizeof(int));
hipMalloc(&cuda_result, sizeof(float));
hipMemcpy(cuda_mat1, mat1, sizeof(float) * mat_size, hipMemcpyHostToDevice);
hipMemcpy(cuda_mat2, mat2, sizeof(float) * mat_size, hipMemcpyHostToDevice);
hipMemcpy(cuda_mat_size, &mat_size, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cuda_result, &result_initial_value, sizeof(float), hipMemcpyHostToDevice);
for (int i = 0; i < TRY_COUNT; i++) {
double start = omp_get_wtime();
hipLaunchKernelGGL(( dot_product), dim3(1), dim3(mat_size) , 0, 0, cuda_mat1, cuda_mat2, cuda_tmp, cuda_mat_size, cuda_result);
hipDeviceSynchronize();
double end = omp_get_wtime();
time_total += end - start;
}
hipMemcpy(&result, cuda_result, sizeof(float), hipMemcpyDeviceToHost);
} else {
int mat_size_dummy = 1000;
hipMalloc(&cuda_mat1, sizeof(float) * mat_size_dummy);
hipMalloc(&cuda_mat2, sizeof(float) * mat_size_dummy);
hipMalloc(&cuda_tmp, sizeof(float) * mat_size_dummy);
hipMalloc(&cuda_mat_size, sizeof(int));
hipMalloc(&cuda_result, sizeof(float));
hipMemcpy(cuda_mat_size, &mat_size_dummy, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cuda_result, &result_initial_value, sizeof(float), hipMemcpyHostToDevice);
for (int i = 0; i < TRY_COUNT; i++) {
double start = omp_get_wtime();
result = 0;
for (int cur = 0; cur < mat_size; cur += 1000) {
hipMemcpy(cuda_mat1, &(mat1[cur]), sizeof(float) * mat_size_dummy, hipMemcpyHostToDevice);
hipMemcpy(cuda_mat2, &(mat2[cur]), sizeof(float) * mat_size_dummy, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dot_product), dim3(1), dim3(1000) , 0, 0, cuda_mat1, cuda_mat2, cuda_tmp, cuda_mat_size, cuda_result);
hipDeviceSynchronize();
float result_tmp;
hipMemcpy(&result_tmp, cuda_result, sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(cuda_result, &result_initial_value, sizeof(float), hipMemcpyHostToDevice);
result += result_tmp;
}
double end = omp_get_wtime();
time_total += end - start;
}
}
printf("Result: %f\n", result);
printf("Time: %fus\n", 1000000 * (time_total / ((double) TRY_COUNT)));
hipFree(cuda_mat1);
hipFree(cuda_mat2);
hipFree(cuda_tmp);
hipFree(cuda_mat_size);
hipFree(cuda_result);
return 0;
}
| cbc0f0033c870903587fcd027d75550190e2eb7b.cu | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
const int TRY_COUNT = 512;
void fetch_data(char *file_name, float **mat1, float **mat2, int *mat_size) {
FILE *file = fopen(file_name, "r");
fscanf(file, "SizeA= %d", mat_size);
float *matA = (float*) malloc(sizeof(float) * (*mat_size));
for (int i = 0; i < (*mat_size); i++) {
fscanf(file, "%f", &(matA[i]));
}
fscanf(file, " ");
fscanf(file, "SizeB= %d", mat_size);
float *matB = (float*) malloc(sizeof(float) * (*mat_size));
for (int i = 0; i < (*mat_size); i++) {
fscanf(file, "%f", &(matB[i]));
}
*mat1 = matA;
*mat2 = matB;
fclose(file);
}
__global__
void dot_product(float *mat1, float *mat2, float *tmp, int *mat_size, float *result) {
tmp[threadIdx.x] = mat1[threadIdx.x] * mat2[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) {
float sum = 0;
for(int i = 0; i < (*mat_size); i++) {
sum += tmp[i];
}
*result = sum;
}
}
int main(int argc, char **argv) {
if (argc != 2) {
printf("Please provide file name!");
exit(1);
}
float *mat1, *mat2;
int mat_size;
fetch_data(argv[1], &mat1, &mat2, &mat_size);
float *cuda_mat1, *cuda_mat2, *cuda_tmp, *cuda_result;
int *cuda_mat_size;
float result_initial_value = 0;
double time_total = 0;
float result;
if (mat_size <= 1000) {
cudaMalloc(&cuda_mat1, sizeof(float) * mat_size);
cudaMalloc(&cuda_mat2, sizeof(float) * mat_size);
cudaMalloc(&cuda_tmp, sizeof(float) * mat_size);
cudaMalloc(&cuda_mat_size, sizeof(int));
cudaMalloc(&cuda_result, sizeof(float));
cudaMemcpy(cuda_mat1, mat1, sizeof(float) * mat_size, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_mat2, mat2, sizeof(float) * mat_size, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_mat_size, &mat_size, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_result, &result_initial_value, sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < TRY_COUNT; i++) {
double start = omp_get_wtime();
dot_product<<< 1, mat_size >>>(cuda_mat1, cuda_mat2, cuda_tmp, cuda_mat_size, cuda_result);
cudaDeviceSynchronize();
double end = omp_get_wtime();
time_total += end - start;
}
cudaMemcpy(&result, cuda_result, sizeof(float), cudaMemcpyDeviceToHost);
} else {
int mat_size_dummy = 1000;
cudaMalloc(&cuda_mat1, sizeof(float) * mat_size_dummy);
cudaMalloc(&cuda_mat2, sizeof(float) * mat_size_dummy);
cudaMalloc(&cuda_tmp, sizeof(float) * mat_size_dummy);
cudaMalloc(&cuda_mat_size, sizeof(int));
cudaMalloc(&cuda_result, sizeof(float));
cudaMemcpy(cuda_mat_size, &mat_size_dummy, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_result, &result_initial_value, sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < TRY_COUNT; i++) {
double start = omp_get_wtime();
result = 0;
for (int cur = 0; cur < mat_size; cur += 1000) {
cudaMemcpy(cuda_mat1, &(mat1[cur]), sizeof(float) * mat_size_dummy, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_mat2, &(mat2[cur]), sizeof(float) * mat_size_dummy, cudaMemcpyHostToDevice);
dot_product<<< 1, 1000 >>>(cuda_mat1, cuda_mat2, cuda_tmp, cuda_mat_size, cuda_result);
cudaDeviceSynchronize();
float result_tmp;
cudaMemcpy(&result_tmp, cuda_result, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(cuda_result, &result_initial_value, sizeof(float), cudaMemcpyHostToDevice);
result += result_tmp;
}
double end = omp_get_wtime();
time_total += end - start;
}
}
printf("Result: %f\n", result);
printf("Time: %fus\n", 1000000 * (time_total / ((double) TRY_COUNT)));
cudaFree(cuda_mat1);
cudaFree(cuda_mat2);
cudaFree(cuda_tmp);
cudaFree(cuda_mat_size);
cudaFree(cuda_result);
return 0;
}
|
59eff126ab076923a576ca2847de596bd31c9204.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "raytrace_cuda.cuh"
/* Utility function to normalize a vector. */
__device__
void normalize(float *v) {
float length = sqrtf(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
for (int i = 0; i < 3; i++)
v[i] /= length;
}
/* Utility function to compute the dot product of two vectors. */
__device__
float dotf(float *a, float *b) {
float d = 0.0;
for (int i = 0; i < 3; i++)
d += a[i] * b[i];
return d;
}
/**
* This function calculates the attenuation to apply in the Phong lighting model
* given the vector difference between the light and the point to be rendered,
* and a factor k.
*/
__device__
float get_attenuation(float *d, float k) {
// If k is zero (the default), then we don't have to do any computation
return (k == 0.0) ? 1.0 : 1.0 / (1.0 + k * dotf(d, d));
}
/**
* This function calculates the color of a point due to a single light using the
* Phong lighting model, given the surface's normal at that point.
*/
__device__
void phong_lighting(float *point, float *normal, float *eye, Light *light,
float *pixel) {
float diffuse_sum[3] = {0.0, 0.0, 0.0};
float specular_sum[3] = {0.0, 0.0, 0.0};
// Get the vector from the camera to the point
float eye_rel[3];
for (int i = 0; i < 3; i++)
eye_rel[i] = eye[i] - point[i];
normalize(eye_rel);
// Get the vector from the light to the point
float light_rel[3];
for (int i = 0; i < 3; i++)
light_rel[i] = light->position[i] - point[i];
// Calculate and apply the attenuation
float light_color[3];
for (int i = 0; i < 3; i++) {
light_color[i] =
light->color[i] * get_attenuation(light_rel, light->attenuation_k);
}
normalize(light_rel);
// Calculate and add the light's contribution to the diffuse and
// specular reflection of the point
float p = fmax(0.0, dotf(normal, light_rel));
for (int i = 0; i < 3; i++)
diffuse_sum[i] += light_color[i] * p;
for (int i = 0; i < 3; i++)
eye_rel[i] += light_rel[i];
normalize(eye_rel);
p = fmax(0.0, dotf(normal, eye_rel));
for (int i = 0; i < 3; i++)
specular_sum[i] += light_color[i] * p;
// Calculate and add the overall point color intensities for red, green,
// and blue
for (int i = 0; i < 3; i++) {
pixel[i] += fmin(1, diffuse_sum[i] + specular_sum[i]);
}
}
/**
* This function calculates the intersection of a ray 'bv' shot from source 'av'
* with a given sphere.
*/
__device__
Intersect get_sphere_intersection(float *av, float *bv, Sphere *sphere,
int refracting) {
// Given a sphere with center C and radius R, and a ray equation B + tA,
// the intersection of the two can be found by solving |B + tA - C|^2 = R^2,
// which enforces that the ray be on the sphere's surface. This is a
// quadratic equation in t.
Intersect intersect;
float rel[3];
for (int i = 0; i < 3; i++)
rel[i] = bv[i] - sphere->loc[i];
float a = dotf(av, av);
float b = 2 * dotf(av, rel);
float c = dotf(rel, rel) - sphere->radius * sphere->radius;
float disc = b * b - 4 * a * c;
// If the discriminant is less than 0, there is no solution and thus no
// intersection
if (disc < 0.0) {
intersect.t = -1.0;
return intersect;
}
disc = sqrtf(disc);
// Choose the smaller solution for t
float t = (-b - disc) / (2 * a);
// If we're using this while we're refracting we're actually trying to go
// across the sphere to the other side, so we want the larger solution for t
if (refracting)
t += disc / a;
// If t is less than 0, then sphere is behind the camera (or the camera is
// inside the sphere, in which case we just don't render it)
if (t < 0.0)
t = -1.0;
// Store the intersection point and the normal vector at it
intersect.t = t;
if (t != -1.0) {
intersect.sphere = 1;
intersect.object = sphere;
for (int i = 0; i < 3; i++)
intersect.position[i] = av[i] * t + bv[i];
float normal[3];
for (int i = 0; i < 3; i++)
normal[i] = intersect.position[i] - sphere->loc[i];
normalize(normal);
for (int i = 0; i < 3; i++)
intersect.normal[i] = normal[i];
}
return intersect;
}
/**
* This function calculates the intersection of a ray 'bv' shot from source 'av'
* with a given plane.
*/
__device__
Intersect get_plane_intersection(float *av, float *bv, Plane *plane) {
// Given a plane with a normal vector N and containing a point Q, and a ray
// equation B + tA, the value of t at which the ray intersects the plane is
// given as t = (N dot (Q - B)) / (N dot A)
Intersect intersect;
float rel[3];
// Calculate Q - B
for (int i = 0; i < 3; i++)
rel[i] = plane->origin[i] - bv[i];
float normal[3];
float *u = plane->u;
float *v = plane->v;
// Calculate the plane's normal vector as u cross v
normal[0] = u[1] * v[2] - u[2] * v[1];
normal[1] = u[2] * v[0] - u[0] * v[2];
normal[2] = u[0] * v[1] - u[1] * v[0];
normalize(normal);
// If the normal is perpendicular to the ray vector, they don't intersect
float t = dotf(normal, av);
if (t != 0.0) {
t = dotf(normal, rel) / t;
// If t < 0 then the plane is behind the ray's origin
if (t < 0.0)
t = -1.0;
}
else
t = -1.0;
intersect.t = t;
if (t != -1.0) {
// Calculate the point on the plane that the ray intersects
float position[3];
for (int i = 0; i < 3; i++)
position[i] = av[i] * t + bv[i];
// Get its position relative to the plane's origin
for (int i = 0; i < 3; i++)
rel[i] = position[i] - plane->origin[i];
// Project this vector onto the plane's u vector and make sure that it
// falls within the plane's u-wise bounds
float p = dotf(rel, plane->u);
if (p < plane->u_min || p > plane->u_max) {
intersect.t = -1.0;
return intersect;
}
// Project this vector onto the plane's v vector and make sure that it
// falls within the plane's v-wise bounds
p = dotf(rel, plane->v);
if (p < plane->v_min || p > plane->v_max) {
intersect.t = -1.0;
return intersect;
}
// Mark this intersection as a plane and fill in its values
intersect.sphere = 0;
intersect.object = plane;
for (int i = 0; i < 3; i++) {
intersect.position[i] = position[i];
intersect.normal[i] = normal[i];
}
}
return intersect;
}
/**
* This function gets the intersection of a ray 'av' with an object surface
* nearest to the ray's origin, 'bv'.
*/
__device__
Intersect get_nearest_intersection(float *av, float *bv, void *start,
Sphere *spheres, Plane *planes,
int sphere_count, int plane_count) {
// Loop through every object in the scene and test it for intersection
Intersect nearest, temp;
nearest.t = -1.0;
void *object;
// Check all of the spheres
for (int i = 0; i < sphere_count; i++) {
object = (void *) (spheres + i);
// Skip over the object the ray started on, if this is a child ray
if (object == start)
continue;
// If the ray intersects the sphere closer than any of the other objects
// we've tested so far, make this the new intersection point
temp = get_sphere_intersection(av, bv, (Sphere *) object, 0);
if (temp.t != -1.0 && (temp.t < nearest.t || nearest.t == -1)) {
nearest = temp;
nearest.object = object;
}
}
// Then check all of the planes
for (int i = 0; i < plane_count; i++) {
object = (void *) (planes + i);
// Skip over the object the ray started on, if this is a child ray
if (object == start)
continue;
// If the ray intersects the plane closer than any of the other objects
// we've tested so far, make this the new intersection point
temp = get_plane_intersection(av, bv, (Plane *) object);
if (temp.t != -1.0 && (temp.t < nearest.t || nearest.t == -1)) {
nearest = temp;
nearest.object = object;
}
}
return nearest;
}
/**
* This function calculates the shading of a point using the Phong lighting
* model, taking into account the fact that some light sources are blocked by
* objects, casting shadows.
*/
__device__
void get_shadows(Sphere *spheres, Plane *planes, Light *lights,
int sphere_count, int plane_count, int light_count,
Intersect intersect, float *parent, float *pixel) {
// Loop through all the lights, send a ray at each one, and test if it hits
// an object along the way
Intersect blocked;
float outgoing[3];
Light *light;
void *object;
for (int i = 0; i < light_count; i++) {
light = lights + i;
blocked.t = -1.0;
// Loop through all the spheres to see if the ray hits one
for (int j = 0; j < sphere_count; j++) {
object = (void *) (spheres + j);
// Skip over the object the point we're shading is on
if (object == intersect.object)
continue;
for (int k = 0; k < 3; k++)
outgoing[k] = light->position[k] - intersect.position[k];
blocked = get_sphere_intersection(outgoing, intersect.position,
(Sphere *) object, 0);
if (blocked.t != -1.0)
break;
}
// If it didn't hit any spheres, check the planes
if (blocked.t == -1.0) {
for (int j = 0; j < plane_count; j++) {
object = (void *) (planes + j);
// Skip over the object the point we're shading is on
if (object == intersect.object)
continue;
for (int k = 0; k < 3; k++)
outgoing[k] = light->position[k] - intersect.position[k];
blocked = get_plane_intersection(outgoing, intersect.position,
(Plane *) object);
if (blocked.t != -1.0)
break;
}
}
// If the light isn't blocked, calculate its contribution to the point
if (blocked.t == -1.0) {
phong_lighting(intersect.position, intersect.normal, parent, light,
pixel);
}
}
}
/**
* The function calculates the contribution of an incoming ray reflected off an
* object to the color of the point on its surface it first struck.
*/
__device__
void get_reflection(Sphere *spheres, Plane *planes, Light *lights,
int sphere_count, int plane_count, int light_count,
Intersect intersect, float *parent, float n, float *pixel) {
// Rotate the vector from the intersection to the eye 180 degrees around the
// normal, as though it reflected off of the surface
float av[3];
for (int i = 0; i < 3; i++)
av[i] = parent[i] - intersect.position[i];
normalize(av);
float p = dotf(av, intersect.normal);
for (int i = 0; i < 3; i++)
av[i] = 2 * p * intersect.normal[i] - av[i];
// Find the next object this ray hits, and calculate its shading, and add it
// to the shading at the ray's start location
Intersect next = get_nearest_intersection(av, intersect.position,
(Sphere *) intersect.object,
spheres, planes, sphere_count,
plane_count);
get_shadows(spheres, planes, lights, sphere_count, plane_count, light_count,
next, intersect.position, pixel);
}
/**
* This function calculates the contribution of an incoming ray refracted
* through an object to the color of the point on its surface it first struck.
*/
__device__
void get_refraction(Sphere *spheres, Plane *planes, Light *lights,
int sphere_count, int light_count, int plane_count,
Intersect intersect, float *parent, float n, float *pixel) {
// Calculate the incoming vector
float incoming[3];
for (int i = 0; i < 3; i++)
incoming[i] = intersect.position[i] - parent[i];
normalize(incoming);
// Calculate the cosine of the incident angle and the squared cosine of the
// refracted angle
float cos1 = -dotf(incoming, intersect.normal);
float cossq2 = 1 - n * n * (1 - cos1 * cos1);
// If the ray is totally reflected, don't trace it
if (cossq2 < 0) {
return;
}
// Calculate the refracted ray
for (int i = 0; i < 3; i++) {
incoming[i] = n * incoming[i]
+ (n * cos1 - sqrt(cossq2)) * intersect.normal[i];
}
// If we're refracting through a plane, we cam just get the next object the
// ray hits and be done
Intersect next;
if (!intersect.sphere) {
next = get_nearest_intersection(incoming, intersect.position,
intersect.object, spheres, planes,
sphere_count, plane_count);
get_shadows(spheres, planes, lights, sphere_count, plane_count,
light_count, next, intersect.position, pixel);
return;
}
// Otherwise, we have to find its intersection with the other side of the
// sphere, refract it back through the surface, and then go on
next = get_sphere_intersection(incoming, intersect.position,
(Sphere *) intersect.object, 1);
// Repeat the process, refracting the ray with the opposite relative index
// of refraction
float np = 1 / n;
cos1 = dotf(incoming, next.normal);
cossq2 = 1 - np * np * (1 - cos1 * cos1);
if (cossq2 < 0) {
return;
}
// Follow this ray to the first object it hits, and calculate the shading
// there, adding it to the initially refracted point
for (int i = 0; i < 3; i++)
incoming[i] = np * incoming[i]
+ (np * cos1 - sqrt(cossq2)) * next.normal[i];
intersect = get_nearest_intersection(incoming, next.position,
intersect.object, spheres, planes,
sphere_count, plane_count);
get_shadows(spheres, planes, lights, sphere_count, plane_count, light_count,
intersect, next.position, pixel);
}
/**
* This kernel raytraces the current scene by having each thread handle an
* individual pixel.
*/
__global__
void raytrace_kernel(float *screen, Sphere *spheres, Plane *planes,
Light *lights, int sphere_count, int plane_count,
int light_count, float *cam_pos, float *e1, float *e2,
float *e3, float Fd, float Fx, float Fy, int xres,
int yres, float n) {
// Get the x and y pixel coordinates (with 0, 0 in the lower left for
// convenience)
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < xres && y < yres) {
Intersect intersect;
intersect.t = -1.0;
// Express the ray vector in terms of our basis by shooting it from the
// camera through a point on its imaginary sensor grid
float av[3];
for (int i = 0; i < 3; i++) {
av[i] = Fd * e3[i] + (x - xres / 2) * (Fx / xres) * e1[i]
+ (y - yres / 2) * (Fy / yres) * e2[i];
}
// Trace the ray to the first surface it hits
intersect = get_nearest_intersection(av, cam_pos, NULL, spheres, planes,
sphere_count, plane_count);
// If it hits a surface, calculate its lighting, as well as a simple
// reflection and refraction of the ray (i.e. recursion depth 1)
if (intersect.t != -1.0) {
float *pixel = screen + 3 * (y * xres + x);
get_shadows(spheres, planes, lights, sphere_count, plane_count,
light_count, intersect, cam_pos, pixel);
get_reflection(spheres, planes, lights, sphere_count, plane_count,
light_count, intersect, cam_pos, n, pixel);
get_refraction(spheres, planes, lights, sphere_count, plane_count,
light_count, intersect, cam_pos, n, pixel);
}
}
}
/* This function calls the kernel to raytrace the current scene. */
void call_raytrace_kernel(float *screen, Sphere *spheres, Plane *planes,
Light *lights, int sphere_count, int plane_count,
int light_count, float *cam_pos, float *e1, float *e2,
float *e3, float Fd, float Fx, float Fy, int xres,
int yres, float n) {
// Have each block handle a 32 x 32 square of pixels
dim3 blocks((xres - 1) / 32 + 1, (yres - 1) / 32 + 1);
dim3 threads(32, 32);
hipLaunchKernelGGL(( raytrace_kernel), dim3(blocks), dim3(threads), 0, 0, screen, spheres, planes, lights,
sphere_count, plane_count, light_count,
cam_pos, e1, e2, e3, Fd, Fx, Fy, xres,
yres, n);
}
| 59eff126ab076923a576ca2847de596bd31c9204.cu | #include "raytrace_cuda.cuh"
/* Utility function to normalize a vector. */
__device__
void normalize(float *v) {
float length = sqrtf(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
for (int i = 0; i < 3; i++)
v[i] /= length;
}
/* Utility function to compute the dot product of two vectors. */
__device__
float dotf(float *a, float *b) {
float d = 0.0;
for (int i = 0; i < 3; i++)
d += a[i] * b[i];
return d;
}
/**
* This function calculates the attenuation to apply in the Phong lighting model
* given the vector difference between the light and the point to be rendered,
* and a factor k.
*/
__device__
float get_attenuation(float *d, float k) {
// If k is zero (the default), then we don't have to do any computation
return (k == 0.0) ? 1.0 : 1.0 / (1.0 + k * dotf(d, d));
}
/**
* This function calculates the color of a point due to a single light using the
* Phong lighting model, given the surface's normal at that point.
*/
__device__
void phong_lighting(float *point, float *normal, float *eye, Light *light,
float *pixel) {
float diffuse_sum[3] = {0.0, 0.0, 0.0};
float specular_sum[3] = {0.0, 0.0, 0.0};
// Get the vector from the camera to the point
float eye_rel[3];
for (int i = 0; i < 3; i++)
eye_rel[i] = eye[i] - point[i];
normalize(eye_rel);
// Get the vector from the light to the point
float light_rel[3];
for (int i = 0; i < 3; i++)
light_rel[i] = light->position[i] - point[i];
// Calculate and apply the attenuation
float light_color[3];
for (int i = 0; i < 3; i++) {
light_color[i] =
light->color[i] * get_attenuation(light_rel, light->attenuation_k);
}
normalize(light_rel);
// Calculate and add the light's contribution to the diffuse and
// specular reflection of the point
float p = fmax(0.0, dotf(normal, light_rel));
for (int i = 0; i < 3; i++)
diffuse_sum[i] += light_color[i] * p;
for (int i = 0; i < 3; i++)
eye_rel[i] += light_rel[i];
normalize(eye_rel);
p = fmax(0.0, dotf(normal, eye_rel));
for (int i = 0; i < 3; i++)
specular_sum[i] += light_color[i] * p;
// Calculate and add the overall point color intensities for red, green,
// and blue
for (int i = 0; i < 3; i++) {
pixel[i] += fmin(1, diffuse_sum[i] + specular_sum[i]);
}
}
/**
* This function calculates the intersection of a ray 'bv' shot from source 'av'
* with a given sphere.
*/
__device__
Intersect get_sphere_intersection(float *av, float *bv, Sphere *sphere,
int refracting) {
// Given a sphere with center C and radius R, and a ray equation B + tA,
// the intersection of the two can be found by solving |B + tA - C|^2 = R^2,
// which enforces that the ray be on the sphere's surface. This is a
// quadratic equation in t.
Intersect intersect;
float rel[3];
for (int i = 0; i < 3; i++)
rel[i] = bv[i] - sphere->loc[i];
float a = dotf(av, av);
float b = 2 * dotf(av, rel);
float c = dotf(rel, rel) - sphere->radius * sphere->radius;
float disc = b * b - 4 * a * c;
// If the discriminant is less than 0, there is no solution and thus no
// intersection
if (disc < 0.0) {
intersect.t = -1.0;
return intersect;
}
disc = sqrtf(disc);
// Choose the smaller solution for t
float t = (-b - disc) / (2 * a);
// If we're using this while we're refracting we're actually trying to go
// across the sphere to the other side, so we want the larger solution for t
if (refracting)
t += disc / a;
// If t is less than 0, then sphere is behind the camera (or the camera is
// inside the sphere, in which case we just don't render it)
if (t < 0.0)
t = -1.0;
// Store the intersection point and the normal vector at it
intersect.t = t;
if (t != -1.0) {
intersect.sphere = 1;
intersect.object = sphere;
for (int i = 0; i < 3; i++)
intersect.position[i] = av[i] * t + bv[i];
float normal[3];
for (int i = 0; i < 3; i++)
normal[i] = intersect.position[i] - sphere->loc[i];
normalize(normal);
for (int i = 0; i < 3; i++)
intersect.normal[i] = normal[i];
}
return intersect;
}
/**
* This function calculates the intersection of a ray 'bv' shot from source 'av'
* with a given plane.
*/
__device__
Intersect get_plane_intersection(float *av, float *bv, Plane *plane) {
// Given a plane with a normal vector N and containing a point Q, and a ray
// equation B + tA, the value of t at which the ray intersects the plane is
// given as t = (N dot (Q - B)) / (N dot A)
Intersect intersect;
float rel[3];
// Calculate Q - B
for (int i = 0; i < 3; i++)
rel[i] = plane->origin[i] - bv[i];
float normal[3];
float *u = plane->u;
float *v = plane->v;
// Calculate the plane's normal vector as u cross v
normal[0] = u[1] * v[2] - u[2] * v[1];
normal[1] = u[2] * v[0] - u[0] * v[2];
normal[2] = u[0] * v[1] - u[1] * v[0];
normalize(normal);
// If the normal is perpendicular to the ray vector, they don't intersect
float t = dotf(normal, av);
if (t != 0.0) {
t = dotf(normal, rel) / t;
// If t < 0 then the plane is behind the ray's origin
if (t < 0.0)
t = -1.0;
}
else
t = -1.0;
intersect.t = t;
if (t != -1.0) {
// Calculate the point on the plane that the ray intersects
float position[3];
for (int i = 0; i < 3; i++)
position[i] = av[i] * t + bv[i];
// Get its position relative to the plane's origin
for (int i = 0; i < 3; i++)
rel[i] = position[i] - plane->origin[i];
// Project this vector onto the plane's u vector and make sure that it
// falls within the plane's u-wise bounds
float p = dotf(rel, plane->u);
if (p < plane->u_min || p > plane->u_max) {
intersect.t = -1.0;
return intersect;
}
// Project this vector onto the plane's v vector and make sure that it
// falls within the plane's v-wise bounds
p = dotf(rel, plane->v);
if (p < plane->v_min || p > plane->v_max) {
intersect.t = -1.0;
return intersect;
}
// Mark this intersection as a plane and fill in its values
intersect.sphere = 0;
intersect.object = plane;
for (int i = 0; i < 3; i++) {
intersect.position[i] = position[i];
intersect.normal[i] = normal[i];
}
}
return intersect;
}
/**
* This function gets the intersection of a ray 'av' with an object surface
* nearest to the ray's origin, 'bv'.
*/
__device__
Intersect get_nearest_intersection(float *av, float *bv, void *start,
Sphere *spheres, Plane *planes,
int sphere_count, int plane_count) {
// Loop through every object in the scene and test it for intersection
Intersect nearest, temp;
nearest.t = -1.0;
void *object;
// Check all of the spheres
for (int i = 0; i < sphere_count; i++) {
object = (void *) (spheres + i);
// Skip over the object the ray started on, if this is a child ray
if (object == start)
continue;
// If the ray intersects the sphere closer than any of the other objects
// we've tested so far, make this the new intersection point
temp = get_sphere_intersection(av, bv, (Sphere *) object, 0);
if (temp.t != -1.0 && (temp.t < nearest.t || nearest.t == -1)) {
nearest = temp;
nearest.object = object;
}
}
// Then check all of the planes
for (int i = 0; i < plane_count; i++) {
object = (void *) (planes + i);
// Skip over the object the ray started on, if this is a child ray
if (object == start)
continue;
// If the ray intersects the plane closer than any of the other objects
// we've tested so far, make this the new intersection point
temp = get_plane_intersection(av, bv, (Plane *) object);
if (temp.t != -1.0 && (temp.t < nearest.t || nearest.t == -1)) {
nearest = temp;
nearest.object = object;
}
}
return nearest;
}
/**
* This function calculates the shading of a point using the Phong lighting
* model, taking into account the fact that some light sources are blocked by
* objects, casting shadows.
*/
__device__
void get_shadows(Sphere *spheres, Plane *planes, Light *lights,
int sphere_count, int plane_count, int light_count,
Intersect intersect, float *parent, float *pixel) {
// Loop through all the lights, send a ray at each one, and test if it hits
// an object along the way
Intersect blocked;
float outgoing[3];
Light *light;
void *object;
for (int i = 0; i < light_count; i++) {
light = lights + i;
blocked.t = -1.0;
// Loop through all the spheres to see if the ray hits one
for (int j = 0; j < sphere_count; j++) {
object = (void *) (spheres + j);
// Skip over the object the point we're shading is on
if (object == intersect.object)
continue;
for (int k = 0; k < 3; k++)
outgoing[k] = light->position[k] - intersect.position[k];
blocked = get_sphere_intersection(outgoing, intersect.position,
(Sphere *) object, 0);
if (blocked.t != -1.0)
break;
}
// If it didn't hit any spheres, check the planes
if (blocked.t == -1.0) {
for (int j = 0; j < plane_count; j++) {
object = (void *) (planes + j);
// Skip over the object the point we're shading is on
if (object == intersect.object)
continue;
for (int k = 0; k < 3; k++)
outgoing[k] = light->position[k] - intersect.position[k];
blocked = get_plane_intersection(outgoing, intersect.position,
(Plane *) object);
if (blocked.t != -1.0)
break;
}
}
// If the light isn't blocked, calculate its contribution to the point
if (blocked.t == -1.0) {
phong_lighting(intersect.position, intersect.normal, parent, light,
pixel);
}
}
}
/**
* The function calculates the contribution of an incoming ray reflected off an
* object to the color of the point on its surface it first struck.
*/
__device__
void get_reflection(Sphere *spheres, Plane *planes, Light *lights,
int sphere_count, int plane_count, int light_count,
Intersect intersect, float *parent, float n, float *pixel) {
// Rotate the vector from the intersection to the eye 180 degrees around the
// normal, as though it reflected off of the surface
float av[3];
for (int i = 0; i < 3; i++)
av[i] = parent[i] - intersect.position[i];
normalize(av);
float p = dotf(av, intersect.normal);
for (int i = 0; i < 3; i++)
av[i] = 2 * p * intersect.normal[i] - av[i];
// Find the next object this ray hits, and calculate its shading, and add it
// to the shading at the ray's start location
Intersect next = get_nearest_intersection(av, intersect.position,
(Sphere *) intersect.object,
spheres, planes, sphere_count,
plane_count);
get_shadows(spheres, planes, lights, sphere_count, plane_count, light_count,
next, intersect.position, pixel);
}
/**
* This function calculates the contribution of an incoming ray refracted
* through an object to the color of the point on its surface it first struck.
*/
__device__
void get_refraction(Sphere *spheres, Plane *planes, Light *lights,
int sphere_count, int light_count, int plane_count,
Intersect intersect, float *parent, float n, float *pixel) {
// Calculate the incoming vector
float incoming[3];
for (int i = 0; i < 3; i++)
incoming[i] = intersect.position[i] - parent[i];
normalize(incoming);
// Calculate the cosine of the incident angle and the squared cosine of the
// refracted angle
float cos1 = -dotf(incoming, intersect.normal);
float cossq2 = 1 - n * n * (1 - cos1 * cos1);
// If the ray is totally reflected, don't trace it
if (cossq2 < 0) {
return;
}
// Calculate the refracted ray
for (int i = 0; i < 3; i++) {
incoming[i] = n * incoming[i]
+ (n * cos1 - sqrt(cossq2)) * intersect.normal[i];
}
// If we're refracting through a plane, we cam just get the next object the
// ray hits and be done
Intersect next;
if (!intersect.sphere) {
next = get_nearest_intersection(incoming, intersect.position,
intersect.object, spheres, planes,
sphere_count, plane_count);
get_shadows(spheres, planes, lights, sphere_count, plane_count,
light_count, next, intersect.position, pixel);
return;
}
// Otherwise, we have to find its intersection with the other side of the
// sphere, refract it back through the surface, and then go on
next = get_sphere_intersection(incoming, intersect.position,
(Sphere *) intersect.object, 1);
// Repeat the process, refracting the ray with the opposite relative index
// of refraction
float np = 1 / n;
cos1 = dotf(incoming, next.normal);
cossq2 = 1 - np * np * (1 - cos1 * cos1);
if (cossq2 < 0) {
return;
}
// Follow this ray to the first object it hits, and calculate the shading
// there, adding it to the initially refracted point
for (int i = 0; i < 3; i++)
incoming[i] = np * incoming[i]
+ (np * cos1 - sqrt(cossq2)) * next.normal[i];
intersect = get_nearest_intersection(incoming, next.position,
intersect.object, spheres, planes,
sphere_count, plane_count);
get_shadows(spheres, planes, lights, sphere_count, plane_count, light_count,
intersect, next.position, pixel);
}
/**
* This kernel raytraces the current scene by having each thread handle an
* individual pixel.
*/
__global__
void raytrace_kernel(float *screen, Sphere *spheres, Plane *planes,
Light *lights, int sphere_count, int plane_count,
int light_count, float *cam_pos, float *e1, float *e2,
float *e3, float Fd, float Fx, float Fy, int xres,
int yres, float n) {
// Get the x and y pixel coordinates (with 0, 0 in the lower left for
// convenience)
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < xres && y < yres) {
Intersect intersect;
intersect.t = -1.0;
// Express the ray vector in terms of our basis by shooting it from the
// camera through a point on its imaginary sensor grid
float av[3];
for (int i = 0; i < 3; i++) {
av[i] = Fd * e3[i] + (x - xres / 2) * (Fx / xres) * e1[i]
+ (y - yres / 2) * (Fy / yres) * e2[i];
}
// Trace the ray to the first surface it hits
intersect = get_nearest_intersection(av, cam_pos, NULL, spheres, planes,
sphere_count, plane_count);
// If it hits a surface, calculate its lighting, as well as a simple
// reflection and refraction of the ray (i.e. recursion depth 1)
if (intersect.t != -1.0) {
float *pixel = screen + 3 * (y * xres + x);
get_shadows(spheres, planes, lights, sphere_count, plane_count,
light_count, intersect, cam_pos, pixel);
get_reflection(spheres, planes, lights, sphere_count, plane_count,
light_count, intersect, cam_pos, n, pixel);
get_refraction(spheres, planes, lights, sphere_count, plane_count,
light_count, intersect, cam_pos, n, pixel);
}
}
}
/* This function calls the kernel to raytrace the current scene. */
void call_raytrace_kernel(float *screen, Sphere *spheres, Plane *planes,
Light *lights, int sphere_count, int plane_count,
int light_count, float *cam_pos, float *e1, float *e2,
float *e3, float Fd, float Fx, float Fy, int xres,
int yres, float n) {
// Have each block handle a 32 x 32 square of pixels
dim3 blocks((xres - 1) / 32 + 1, (yres - 1) / 32 + 1);
dim3 threads(32, 32);
raytrace_kernel<<<blocks, threads>>>(screen, spheres, planes, lights,
sphere_count, plane_count, light_count,
cam_pos, e1, e2, e3, Fd, Fx, Fy, xres,
yres, n);
}
|
3e6ae4cc2cc54af0bfd80807f6848a77f42c73e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void gpuActualiza(float *layer, int posicion, float energia,int size) {
float umbral = 0.001;
int gid = ( blockIdx.x + gridDim.x * blockIdx.y ) * ( blockDim.x * blockDim.y ) + ( blockIdx.x + gridDim.x * blockIdx.y )
if(gid < size) {
int distancia = posicion - gid;
if ( distancia < 0 ) distancia = - distancia;
distancia = distancia + 1;
float atenuacion = sqrtf( (float)distancia );
float energia_k = energia / atenuacion;
if ( energia_k >= umbral || energia_k <= -umbral )
layer[gid] = layer[gid] + energia_k;
}
// nos aseguramos que se haya actualizado completamente la capa antes de calcular los mximos
__syncthreads();
}
__global__ void gpuRelajacion(float *layer, float *layer_copy, int layer_size) {
int gid = ( blockIdx.x + gridDim.x * blockIdx.y ) * ( blockDim.x * blockDim.y ) + ( blockIdx.x + gridDim.x * blockIdx.y )
if(gid>0 && gid < layer_size-1){
layer[gid] = ( layer_copy[gid-1] + layer_copy[gid] + layer_copy[gid+1] ) / 3;
}
}
__global__ void gpuCopia(float *layer, float *layer_copy,int size) {
int gid = ( blockIdx.x + gridDim.x * blockIdx.y ) * ( blockDim.x * blockDim.y ) + ( blockIdx.x + gridDim.x * blockIdx.y )
if(gid < size){
layer_copy[gid]=layer[gid];
}
}
| 3e6ae4cc2cc54af0bfd80807f6848a77f42c73e0.cu | __global__ void gpuActualiza(float *layer, int posicion, float energia,int size) {
float umbral = 0.001;
int gid = ( blockIdx.x + gridDim.x * blockIdx.y ) * ( blockDim.x * blockDim.y ) + ( blockIdx.x + gridDim.x * blockIdx.y )
if(gid < size) {
int distancia = posicion - gid;
if ( distancia < 0 ) distancia = - distancia;
distancia = distancia + 1;
float atenuacion = sqrtf( (float)distancia );
float energia_k = energia / atenuacion;
if ( energia_k >= umbral || energia_k <= -umbral )
layer[gid] = layer[gid] + energia_k;
}
// nos aseguramos que se haya actualizado completamente la capa antes de calcular los máximos
__syncthreads();
}
__global__ void gpuRelajacion(float *layer, float *layer_copy, int layer_size) {
int gid = ( blockIdx.x + gridDim.x * blockIdx.y ) * ( blockDim.x * blockDim.y ) + ( blockIdx.x + gridDim.x * blockIdx.y )
if(gid>0 && gid < layer_size-1){
layer[gid] = ( layer_copy[gid-1] + layer_copy[gid] + layer_copy[gid+1] ) / 3;
}
}
__global__ void gpuCopia(float *layer, float *layer_copy,int size) {
int gid = ( blockIdx.x + gridDim.x * blockIdx.y ) * ( blockDim.x * blockDim.y ) + ( blockIdx.x + gridDim.x * blockIdx.y )
if(gid < size){
layer_copy[gid]=layer[gid];
}
}
|
8fad01aad3810ebdc05310f08ff8fa78a15c6c60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathMagma.hip"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(scalar_t);
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(scalar_t);
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice));
}
static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self)
{
THAssert(self->dim() == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
THCudaCheck(hipMemcpy(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, hipMemcpyDeviceToHost));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->dim() == 2);
if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0))
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size(0), src->size(1) };
int64_t stride[2] = { 1, src->size(0) };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional");
TORCH_CHECK(a_->size(0) == b_->size(0), "Expected A and b to have same size "
"at dim 0, but A has ", a_->size(0), " rows and B has ", b_->size(0), " rows");
THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
scalar_t *a_data = THCTensor_(data)(state, a);
scalar_t *b_data = THCTensor_(data)(state, b);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t nrhs = b->size(1);
scalar_t wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
scalar_t *hwork = th_magma_malloc_pinned<scalar_t>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos)
{
#ifdef USE_MAGMA
int64_t n = THTensor_sizeLegacyNoScalars(a, 0);
int64_t lda = n;
magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower;
magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec;
THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a);
scalar_t *input_data = THCTensor_(data)(state, input);
if (n > 0) {
// eigen values and workspace
scalar_t *w = th_magma_malloc_pinned<scalar_t>(n);
scalar_t *wA = th_magma_malloc_pinned<scalar_t>(lda * n);
// compute optimal size of work array
int info;
scalar_t lwork;
int liwork;
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#endif
scalar_t *work = th_magma_malloc_pinned<scalar_t>((size_t)lwork);
int *iwork = th_magma_malloc_pinned<int>(liwork);
// compute eigenvalues and, optionally, eigenvectors
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#endif
// copy eigen values from w to re_
if (info == 0)
THCTensor_(copyArray1d)(state, re_, w, n);
magma_free_pinned(iwork);
magma_free_pinned(work);
magma_free_pinned(wA);
magma_free_pinned(w);
// check error value
if (info > 0)
THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA syev : Argument %d : illegal value", -info);
}
if (jobzs[0] == 'N') {
// If eigenvector is not needed, fill the result with zeros.
THCTensor_(zero)(state, rv_);
THCTensor_(free)(state, input);
} else {
THCTensor_(freeCopyTo)(state, input, rv_);
}
#else
THError(NoMagma(syev));
#endif
}
void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs)
{
#ifdef USE_MAGMA
THArgCheck(a_->dim() == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size(0) == a_->size(1), 3, "A should be square");
magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec;
int64_t n = a_->size(0);
scalar_t *a_data = th_magma_malloc_pinned<scalar_t>(n * n);
THCTensor_(copyTensor2d)(state, a_data, a_);
scalar_t *wr = th_magma_malloc_pinned<scalar_t>(n);
scalar_t *wi = th_magma_malloc_pinned<scalar_t>(n);
scalar_t *vr_data = NULL;
int64_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_malloc_pinned<scalar_t>(n * n);
ldvr = n;
}
scalar_t *work_data = nullptr;
if (n > 0) {
int info;
scalar_t wkopt;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
work_data = th_magma_malloc_pinned<scalar_t>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
}
{
THCTensor_(resize2d)(state, re_, 2, n);
THCTensor *re = THCTensor_(newContiguous)(state, re_);
if (n > 0) {
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset(), wr, n*sizeof(scalar_t), hipMemcpyHostToDevice));
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset() + n, wi, n*sizeof(scalar_t), hipMemcpyHostToDevice));
}
THCTensor_(freeCopyTo)(state, re, re_);
THCTensor_(transpose)(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCTensor_(copyArray2d)(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
void THCTensor_(gesdd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a,
const char *some, const char* compute_uv)
{
#ifdef USE_MAGMA
THCTensor *ra_ = THCTensor_(new)(state);
THCTensor_(gesdd2)(state, ru_, rs_, rv_, ra_, a, some, compute_uv);
THCTensor_(free)(state, ra_);
#else
THError(NoMagma(gesdd));
#endif
}
void THCTensor_(gesdd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a,
const char *some, const char* compute_uv)
{
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
char jobus = compute_uv[0] == 'N' ? 'N' : some[0];
magma_vec_t jobz = jobus == 'A' ? MagmaAllVec : jobus == 'S' ? MagmaSomeVec : jobus == 'O' ? MagmaOverwriteVec : MagmaNoVec;
int iunused[1];
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = m < n ? m : n;
int64_t j = (jobz == MagmaAllVec) ? m : k;
int64_t jv = (jobz == MagmaAllVec) ? n : k;
scalar_t *a_data = th_magma_malloc_pinned<scalar_t>(m * n);
THCTensor_(copyTensor2d)(state, a_data, a);
scalar_t *rs_data = th_magma_malloc_pinned<scalar_t>(k);
scalar_t *ru_data = NULL;
scalar_t *rv_data = NULL;
if (jobz != MagmaNoVec) {
ru_data = th_magma_malloc_pinned<scalar_t>(m * j);
rv_data = th_magma_malloc_pinned<scalar_t>(n * n);
}
scalar_t wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#endif
int lwork = (int) wkopt;
scalar_t *work_data = th_magma_malloc_pinned<scalar_t>(lwork);
int *iwork = th_magma_malloc_pinned<int>(8 * k);
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#endif
if (info > 0)
THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info);
else if (info < 0)
THError("MAGMA gesdd : Argument %d : illegal value", -info);
THCTensor_(copyArray1d)(state, rs_, rs_data, k);
THCTensor_(copyArray2d)(state, ra_, a_data, m, n);
if (jobz != MagmaNoVec) {
THCTensor_(copyArray2d)(state, rv_, rv_data, n, n);
THCTensor_(transpose)(state, rv_, NULL, 0, 1);
if (jobz != MagmaAllVec)
THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv);
THCTensor_(copyArray2d)(state, ru_, ru_data, m, j);
magma_free_pinned(rv_data);
magma_free_pinned(ru_data);
} else {
THCTensor_(resize2d)(state, rv_, n, n);
THCTensor_(zero)(state, rv_);
THCTensor_(resize2d)(state, ru_, m, m);
THCTensor_(zero)(state, ru_);
}
magma_free_pinned(work_data);
magma_free_pinned(iwork);
magma_free_pinned(rs_data);
magma_free_pinned(a_data);
#else
THError(NoMagma(gesdd2));
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
scalar_t *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
hipStream_t stream = THCState_getCurrentStream(state);
const int len = n*n;
dim3 blocks(::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo[0] == 'U') {
hipLaunchKernelGGL(( THCTensor_(copyUpperSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
} else {
hipLaunchKernelGGL(( THCTensor_(copyLowerSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = (m < n ? m : n);
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
scalar_t *rtau_data = th_magma_malloc_pinned<scalar_t>(k);
scalar_t *a_data = THCTensor_(data)(state, a);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(copyArray1d)(state, rtau_, rtau_data, k);
magma_free_pinned(rtau_data);
#else
THError(NoMagma(geqrf));
#endif
}
#endif
#endif
| 8fad01aad3810ebdc05310f08ff8fa78a15c6c60.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathMagma.cu"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(scalar_t);
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(scalar_t);
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice));
}
static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self)
{
THAssert(self->dim() == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
THCudaCheck(cudaMemcpy(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, cudaMemcpyDeviceToHost));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->dim() == 2);
if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0))
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size(0), src->size(1) };
int64_t stride[2] = { 1, src->size(0) };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional");
TORCH_CHECK(a_->size(0) == b_->size(0), "Expected A and b to have same size "
"at dim 0, but A has ", a_->size(0), " rows and B has ", b_->size(0), " rows");
THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
scalar_t *a_data = THCTensor_(data)(state, a);
scalar_t *b_data = THCTensor_(data)(state, b);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t nrhs = b->size(1);
scalar_t wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
scalar_t *hwork = th_magma_malloc_pinned<scalar_t>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos)
{
#ifdef USE_MAGMA
int64_t n = THTensor_sizeLegacyNoScalars(a, 0);
int64_t lda = n;
magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower;
magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec;
THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a);
scalar_t *input_data = THCTensor_(data)(state, input);
if (n > 0) {
// eigen values and workspace
scalar_t *w = th_magma_malloc_pinned<scalar_t>(n);
scalar_t *wA = th_magma_malloc_pinned<scalar_t>(lda * n);
// compute optimal size of work array
int info;
scalar_t lwork;
int liwork;
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#endif
scalar_t *work = th_magma_malloc_pinned<scalar_t>((size_t)lwork);
int *iwork = th_magma_malloc_pinned<int>(liwork);
// compute eigenvalues and, optionally, eigenvectors
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#endif
// copy eigen values from w to re_
if (info == 0)
THCTensor_(copyArray1d)(state, re_, w, n);
magma_free_pinned(iwork);
magma_free_pinned(work);
magma_free_pinned(wA);
magma_free_pinned(w);
// check error value
if (info > 0)
THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA syev : Argument %d : illegal value", -info);
}
if (jobzs[0] == 'N') {
// If eigenvector is not needed, fill the result with zeros.
THCTensor_(zero)(state, rv_);
THCTensor_(free)(state, input);
} else {
THCTensor_(freeCopyTo)(state, input, rv_);
}
#else
THError(NoMagma(syev));
#endif
}
void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs)
{
#ifdef USE_MAGMA
THArgCheck(a_->dim() == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size(0) == a_->size(1), 3, "A should be square");
magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec;
int64_t n = a_->size(0);
scalar_t *a_data = th_magma_malloc_pinned<scalar_t>(n * n);
THCTensor_(copyTensor2d)(state, a_data, a_);
scalar_t *wr = th_magma_malloc_pinned<scalar_t>(n);
scalar_t *wi = th_magma_malloc_pinned<scalar_t>(n);
scalar_t *vr_data = NULL;
int64_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_malloc_pinned<scalar_t>(n * n);
ldvr = n;
}
scalar_t *work_data = nullptr;
if (n > 0) {
int info;
scalar_t wkopt;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
work_data = th_magma_malloc_pinned<scalar_t>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
}
{
THCTensor_(resize2d)(state, re_, 2, n);
THCTensor *re = THCTensor_(newContiguous)(state, re_);
if (n > 0) {
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset(), wr, n*sizeof(scalar_t), cudaMemcpyHostToDevice));
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset() + n, wi, n*sizeof(scalar_t), cudaMemcpyHostToDevice));
}
THCTensor_(freeCopyTo)(state, re, re_);
THCTensor_(transpose)(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCTensor_(copyArray2d)(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
void THCTensor_(gesdd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a,
const char *some, const char* compute_uv)
{
#ifdef USE_MAGMA
THCTensor *ra_ = THCTensor_(new)(state);
THCTensor_(gesdd2)(state, ru_, rs_, rv_, ra_, a, some, compute_uv);
THCTensor_(free)(state, ra_);
#else
THError(NoMagma(gesdd));
#endif
}
void THCTensor_(gesdd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a,
const char *some, const char* compute_uv)
{
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
char jobus = compute_uv[0] == 'N' ? 'N' : some[0];
magma_vec_t jobz = jobus == 'A' ? MagmaAllVec : jobus == 'S' ? MagmaSomeVec : jobus == 'O' ? MagmaOverwriteVec : MagmaNoVec;
int iunused[1];
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = m < n ? m : n;
int64_t j = (jobz == MagmaAllVec) ? m : k;
int64_t jv = (jobz == MagmaAllVec) ? n : k;
scalar_t *a_data = th_magma_malloc_pinned<scalar_t>(m * n);
THCTensor_(copyTensor2d)(state, a_data, a);
scalar_t *rs_data = th_magma_malloc_pinned<scalar_t>(k);
scalar_t *ru_data = NULL;
scalar_t *rv_data = NULL;
if (jobz != MagmaNoVec) {
ru_data = th_magma_malloc_pinned<scalar_t>(m * j);
rv_data = th_magma_malloc_pinned<scalar_t>(n * n);
}
scalar_t wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#endif
int lwork = (int) wkopt;
scalar_t *work_data = th_magma_malloc_pinned<scalar_t>(lwork);
int *iwork = th_magma_malloc_pinned<int>(8 * k);
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#endif
if (info > 0)
THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info);
else if (info < 0)
THError("MAGMA gesdd : Argument %d : illegal value", -info);
THCTensor_(copyArray1d)(state, rs_, rs_data, k);
THCTensor_(copyArray2d)(state, ra_, a_data, m, n);
if (jobz != MagmaNoVec) {
THCTensor_(copyArray2d)(state, rv_, rv_data, n, n);
THCTensor_(transpose)(state, rv_, NULL, 0, 1);
if (jobz != MagmaAllVec)
THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv);
THCTensor_(copyArray2d)(state, ru_, ru_data, m, j);
magma_free_pinned(rv_data);
magma_free_pinned(ru_data);
} else {
THCTensor_(resize2d)(state, rv_, n, n);
THCTensor_(zero)(state, rv_);
THCTensor_(resize2d)(state, ru_, m, m);
THCTensor_(zero)(state, ru_);
}
magma_free_pinned(work_data);
magma_free_pinned(iwork);
magma_free_pinned(rs_data);
magma_free_pinned(a_data);
#else
THError(NoMagma(gesdd2));
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
scalar_t *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
cudaStream_t stream = THCState_getCurrentStream(state);
const int len = n*n;
dim3 blocks(std::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo[0] == 'U') {
THCTensor_(copyUpperSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
} else {
THCTensor_(copyLowerSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = (m < n ? m : n);
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
scalar_t *rtau_data = th_magma_malloc_pinned<scalar_t>(k);
scalar_t *a_data = THCTensor_(data)(state, a);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(copyArray1d)(state, rtau_, rtau_data, k);
magma_free_pinned(rtau_data);
#else
THError(NoMagma(geqrf));
#endif
}
#endif
#endif
|
b3d430ca9fc19fe54201b516e7d82054c128f93d.hip | // !!! This is a file automatically generated by hipify!!!
//
// ServerKernels.cu
//
//
#include "Profile.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand.h>
#include <iostream>
#include <assert.h>
#include <vector>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
// ==================================================================
// CudaEventTimer
//
// Example usage:
//
// T.Begin();
// generate << < blocks, threads >> > (gData, DataPerBlock);
// T.End();
// hipError_t err = hipGetLastError();
// printf("\nError = %s", hipGetErrorString(err));
// printf("\nDuration of generate kernel = %.3f ms for %d floats\n\n", T.GetTime(), N);
// ==================================================================
class CudaEventTimer
{
public:
CudaEventTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~CudaEventTimer() { hipEventDestroy(start); hipEventDestroy(stop); }
void Begin() { hipEventRecord(start); }
void End() {
hipEventRecord(stop);
hipEventSynchronize(stop);
}
float GetTime(void) { hipEventElapsedTime(&ms, start, stop); return ms; }
private:
hipEvent_t start, stop;
float ms;
};
// ==================================================================
// ==================================================================
using namespace std;
/* BIG = 1/MACHEPF */
#define BIG 16777216.0f
#define MACHEPF 5.9604644775390625E-8f
/* MAXNUMF = 2^128 * (1 - 2^-24) */
#define MAXNUMF 3.4028234663852885981170418348451692544e38f
/* log(2^-149) */
#define MINLOGF -103.278929903431851103f
#define PIF 3.141592653589793238f
#define PIINV 0.318309886183790671538f
/* log( sqrt( 2*pi ) ) */
#define LS2PI 0.91893853320467274178f
#define MAXLGM 2.035093e36
#define MAXLOGF 88.72283905206835f
// sqrt(2pi)
#define s2pi 2.50662827463100050242f
/* log gamma(x+2), -.5 < x < .5 */
__host__ __device__ float polevlfB(float xx)
{
float t = 6.055172732649237E-004f;
t = t * xx - 1.311620815545743E-003f;
t = t * xx + 2.863437556468661E-003f;
t = t * xx - 7.366775108654962E-003f;
t = t * xx + 2.058355474821512E-002f;
t = t * xx - 6.735323259371034E-002f;
t = t * xx + 3.224669577325661E-001f;
t = t * xx + 4.227843421859038E-001f;
return t;
}
/* log gamma(x+1), -.25 < x < .25 */
__host__ __device__ float polevlfC(float xx)
{
float t = 1.369488127325832E-001f;
t = t * xx - 1.590086327657347E-001f;
t = t * xx + 1.692415923504637E-001f;
t = t * xx - 2.067882815621965E-001f;
t = t * xx + 2.705806208275915E-001f;
t = t * xx - 4.006931650563372E-001f;
t = t * xx + 8.224670749082976E-001f;
t = t * xx - 5.772156501719101E-001f;
return t;
}
/* approximation for 0 <= |y - 0.5| <= 3/8 */
__host__ __device__ float polevlfP0(float xx)
{
float t = -5.99633501014107895267E1f;
t = t * xx + 9.80010754185999661536E1f;
t = t * xx - 5.66762857469070293439E1f;
t = t * xx + 1.39312609387279679503E1f;
t = t * xx - 1.23916583867381258016E0f;
return t;
}
__host__ __device__ float p1evlfQ0(float xx)
{
float t = xx + 1.95448858338141759834E0f;
t = t * xx + 4.67627912898881538453E0f;
t = t * xx + 8.63602421390890590575E1f;
t = t * xx - 2.25462687854119370527E2f;
t = t * xx + 2.00260212380060660359E2f;
t = t * xx - 8.20372256168333339912E1f;
t = t * xx + 1.59056225126211695515E1f;
t = t * xx - 1.18331621121330003142E0f;
return t;
}
/* Approximation for interval z = sqrt(-2 log y ) between 2 and 8
* i.e., y between exp(-2) = .135 and exp(-32) = 1.27e-14.
*/
__host__ __device__ float polevlfP1(float xx)
{
float t = 4.05544892305962419923E0f;
t = t * xx + 3.15251094599893866154E1f;
t = t * xx + 5.71628192246421288162E1f;
t = t * xx + 4.40805073893200834700E1f;
t = t * xx + 1.46849561928858024014E1f;
t = t * xx + 2.18663306850790267539E0f;
t = t * xx - 1.40256079171354495875E-1f;
t = t * xx - 3.50424626827848203418E-2f;
t = t * xx - 8.57456785154685413611E-4f;
return t;
}
__host__ __device__ float p1evlfQ1(float xx)
{
float t = xx + 1.57799883256466749731E1f;
t = t * xx + 4.53907635128879210584E1f;
t = t * xx + 4.13172038254672030440E1f;
t = t * xx + 1.50425385692907503408E1f;
t = t * xx + 2.50464946208309415979E0f;
t = t * xx - 1.42182922854787788574E-1f;
t = t * xx - 3.80806407691578277194E-2f;
t = t * xx - 9.33259480895457427372E-4f;
return t;
}
/* Approximation for interval z = sqrt(-2 log y ) between 8 and 64
* i.e., y between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890.
*/
__host__ __device__ float polevlfP2(float xx)
{
float t = 3.23774891776946035970E0f;
t = t * xx + 6.91522889068984211695E0f;
t = t * xx + 3.93881025292474443415E0f;
t = t * xx + 1.33303460815807542389E0f;
t = t * xx + 2.01485389549179081538E-1f;
t = t * xx + 1.23716634817820021358E-2f;
t = t * xx + 3.01581553508235416007E-4f;
t = t * xx + 2.65806974686737550832E-6f;
t = t * xx + 6.23974539184983293730E-9f;
return t;
}
__host__ __device__ float p1evlfQ2(float xx)
{
float t = xx + 6.02427039364742014255E0f;
t = t * xx + 3.67983563856160859403E0f;
t = t * xx + 1.37702099489081330271E0f;
t = t * xx + 2.16236993594496635890E-1f;
t = t * xx + 1.34204006088543189037E-2f;
t = t * xx + 3.28014464682127739104E-4f;
t = t * xx + 2.89247864745380683936E-6f;
t = t * xx + 6.79019408009981274425E-9f;
return t;
}
__host__ __device__ float lgamf(float xx)
{
float p, q, w, z, x;
float nx, tx;
int i, direction;
int sgngamf = 1;
x = xx;
if (x < 0.0f)
{
q = -x;
w = lgamf(q); /* note this modifies sgngam! */
p = floorf(q);
if (p == q)
goto loverf;
i = (int)p;
if ((i & 1) == 0)
sgngamf = -1;
else
sgngamf = 1;
z = q - p;
if (z > 0.5f)
{
p += 1.0f;
z = p - q;
}
z = q * sinf(PIF * z);
if (z == 0.0)
goto loverf;
z = -logf(PIINV*z) - w;
return(z);
}
if (x < 6.5f)
{
direction = 0;
z = 1.0;
tx = x;
nx = 0.0;
if (x >= 1.5)
{
while (tx > 2.5f)
{
nx -= 1.0f;
tx = x + nx;
z *= tx;
}
x += nx - 2.0f;
iv1r5:
p = x * polevlfB(x);
goto cont;
}
if (x >= 1.25f)
{
z *= x;
x -= 1.0f; /* x + 1 - 2 */
direction = 1;
goto iv1r5;
}
if (x >= 0.75f)
{
x -= 1.0f;
p = x * polevlfC(x);
q = 0.0f;
goto contz;
}
while (tx < 1.5f)
{
if (tx == 0.0f)
goto loverf;
z *= tx;
nx += 1.0f;
tx = x + nx;
}
direction = 1;
x += nx - 2.0f;
p = x * polevlfB(x);
cont:
if (z < 0.0f)
{
sgngamf = -1;
z = -z;
}
else
{
sgngamf = 1;
}
q = logf(z);
if (direction)
q = -q;
contz:
return(p + q);
}
if (x > MAXLGM)
{
loverf:
return(sgngamf * MAXNUMF); // overflow
}
// Note, though an asymptotic formula could be used for x >= 3,
// there is cancellation error in the following if x < 6.5.
q = LS2PI - x;
q += (x - 0.5f) * logf(x);
if (x <= 1.0e4)
{
z = 1.0f / x;
p = z * z;
q += ((6.789774945028216E-004f * p
- 2.769887652139868E-003f) * p
+ 8.333316229807355E-002f) * z;
}
return(q);
}
//
// Continued fraction expansion #1 for incomplete beta integral.
//
__host__ __device__ float incbcff(float aa, float bb, float xx)
{
float a, b, x, xk, pk, pkm1, pkm2, qk, qkm1, qkm2;
float k1, k2, k3, k4, k5, k6, k7, k8;
float r, t, ans;
int n;
a = aa;
b = bb;
x = xx;
k1 = a;
k2 = a + b;
k3 = a;
k4 = a + 1.0f;
k5 = 1.0f;
k6 = b - 1.0f;
k7 = k4;
k8 = a + 2.0f;
pkm2 = 0.0f;
qkm2 = 1.0f;
pkm1 = 1.0f;
qkm1 = 1.0f;
ans = 1.0f;
r = 0.0f;
n = 0;
do
{
xk = -(x * k1 * k2) / (k3 * k4);
pk = pkm1 + pkm2 * xk;
qk = qkm1 + qkm2 * xk;
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
xk = (x * k5 * k6) / (k7 * k8);
pk = pkm1 + pkm2 * xk;
qk = qkm1 + qkm2 * xk;
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
if (qk != 0)
r = pk / qk;
if (r != 0)
{
t = fabsf((ans - r) / r);
ans = r;
}
else
t = 1.0f;
if (t < MACHEPF) return (ans);
k1 += 1.0f;
k2 += 1.0f;
k3 += 2.0f;
k4 += 2.0f;
k5 += 1.0f;
k6 -= 1.0f;
k7 += 2.0f;
k8 += 2.0f;
if ((fabsf(qk) + fabsf(pk)) > BIG)
{
pkm2 *= MACHEPF;
pkm1 *= MACHEPF;
qkm2 *= MACHEPF;
qkm1 *= MACHEPF;
}
if ((fabsf(qk) < MACHEPF) || (fabsf(pk) < MACHEPF))
{
pkm2 *= BIG;
pkm1 *= BIG;
qkm2 *= BIG;
qkm1 *= BIG;
}
} while (++n < 100);
return(ans);
}
//
// Continued fraction expansion #2 for incomplete beta integral.
//
__host__ __device__ float incbdf(float aa, float bb, float xx)
{
float a, b, x, xk, pk, pkm1, pkm2, qk, qkm1, qkm2;
float k1, k2, k3, k4, k5, k6, k7, k8;
float r, t, ans, z;
int n;
a = aa;
b = bb;
x = xx;
k1 = a;
k2 = b - 1.0f;
k3 = a;
k4 = a + 1.0f;
k5 = 1.0f;
k6 = a + b;
k7 = a + 1.0f;
k8 = a + 2.0f;
pkm2 = 0.0f;
qkm2 = 1.0f;
pkm1 = 1.0f;
qkm1 = 1.0f;
z = x / (1.0f - x);
ans = 1.0f;
r = 0.0f;
n = 0;
do
{
xk = -(z * k1 * k2) / (k3 * k4);
pk = pkm1 + pkm2 * xk;
qk = qkm1 + qkm2 * xk;
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
xk = (z * k5 * k6) / (k7 * k8);
pk = pkm1 + pkm2 * xk;
qk = qkm1 + qkm2 * xk;
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
if (qk != 0)
r = pk / qk;
if (r != 0)
{
t = fabsf((ans - r) / r);
ans = r;
}
else
t = 1.0f;
if (t < MACHEPF) return ans; // underflow
k1 += 1.0f;
k2 -= 1.0f;
k3 += 2.0f;
k4 += 2.0f;
k5 += 1.0f;
k6 += 1.0f;
k7 += 2.0f;
k8 += 2.0f;
if ((fabsf(qk) + fabsf(pk)) > BIG)
{
pkm2 *= MACHEPF;
pkm1 *= MACHEPF;
qkm2 *= MACHEPF;
qkm1 *= MACHEPF;
}
if ((fabsf(qk) < MACHEPF) || (fabsf(pk) < MACHEPF))
{
pkm2 *= BIG;
pkm1 *= BIG;
qkm2 *= BIG;
qkm1 *= BIG;
}
} while (++n < 100);
return(ans);
}
__host__ __device__ float incbpsf(float aa, float bb, float xx)
{
float a, b, x, t, u, y, s;
a = aa;
b = bb;
x = xx;
y = a * logf(x) + (b - 1.0f)*logf(1.0f - x) - logf(a);
y -= lgamf(a) + lgamf(b);
y += lgamf(a + b);
t = x / (1.0f - x);
s = 0.0f;
u = 1.0f;
do
{
b -= 1.0f;
if (b == 0.0f)
break;
a += 1.0f;
u *= t*b / a;
s += u;
} while (fabsf(u) > MACHEPF);
if (y < MINLOGF)
{
s = 0.0f; // underflow
}
else
s = expf(y) * (1.0f + s);
return(s);
}
__host__ __device__ float incbetf(float aa, float bb, float xx)
{
float ans, a, b, t, x, onemx;
int flag;
if ((xx <= 0.0f) || (xx >= 1.0f))
{
if (xx == 0.0f)
return(0.0f);
if (xx == 1.0f)
return(1.0f);
return(0.0f);
}
onemx = 1.0f - xx;
// Transformation for small aa.
if (aa <= 1.0f)
{
ans = incbetf(aa + 1.0f, bb, xx);
t = aa*logf(xx) + bb*logf(1.0f - xx)
+ lgamf(aa + bb) - lgamf(aa + 1.0f) - lgamf(bb);
if (t > MINLOGF)
ans += expf(t);
return(ans);
}
// see if x is greater than the mean.
if (xx > (aa / (aa + bb)))
{
flag = 1;
a = bb;
b = aa;
t = xx;
x = onemx;
}
else
{
flag = 0;
a = aa;
b = bb;
t = onemx;
x = xx;
}
// Choose expansion for optimal convergence.
if (b > 10.0f)
{
if (fabsf(b*x / a) < 0.3f)
{
t = incbpsf(a, b, x);
goto bdone;
}
}
ans = x * (a + b - 2.0f) / (a - 1.0f);
if (ans < 1.0f)
{
ans = incbcff(a, b, x);
t = b * logf(t);
}
else
{
ans = incbdf(a, b, x);
t = (b - 1.0f) * logf(t);
}
t += a*logf(x) + lgamf(a + b) - lgamf(a) - lgamf(b);
t += logf(ans / a);
if (t < MINLOGF)
{
t = 0.0f; // underflow
}
else
{
t = expf(t);
}
bdone:
if (flag)
t = 1.0f - t;
return(t);
}
__host__ __device__ float fdtrcf(float a, float b, float x)
{
float w;
if ((a < 1.0f) || (b < 1.0f) || (x < 0.0f))
{
return(0.0f);
}
w = b / (b + a * x);
return incbetf(0.5f*b, 0.5f*a, w);
}
__host__ __device__ float ndtrif(float yy0)
{
float y0, x, y, z, y2, x0, x1;
int code;
y0 = yy0;
if (y0 <= 0.0f)
{
// mtherr("ndtrif", DOMAIN);
return(-MAXNUMF);
}
if (y0 >= 1.0f)
{
// mtherr("ndtrif", DOMAIN);
return(MAXNUMF);
}
code = 1;
y = y0;
if (y > (1.0 - 0.13533528323661269189f)) /* 0.135... = exp(-2) */
{
y = 1.0f - y;
code = 0;
}
if (y > 0.13533528323661269189f)
{
y = y - 0.5f;
y2 = y * y;
// x = y + y * (y2 * polevlf(y2, P0, 4) / p1evlf(y2, Q0, 8));
x = y + y * (y2 * polevlfP0(y2) / p1evlfQ0(y2));
x = x * s2pi;
return(x);
}
x = sqrtf(-2.0f * logf(y));
x0 = x - logf(x) / x;
z = 1.0f / x;
if (x < 8.0f) /* y > exp(-32) = 1.2664165549e-14 */
// x1 = z * polevlf(z, P1, 8) / p1evlf(z, Q1, 8);
x1 = z * polevlfP1(z) / p1evlfQ1(z);
else
// x1 = z * polevlf(z, P2, 8) / p1evlf(z, Q2, 8);
x1 = z * polevlfP2(z) / p1evlfQ2(z);
x = x0 - x1;
if (code != 0)
x = -x;
return(x);
}
__host__ __device__ float igamcf(float aa, float xx);
__host__ __device__ float igamf(float aa, float xx)
{
float a, x, ans, ax, c, r;
a = aa;
x = xx;
if ((x <= 0) || (a <= 0))
return(0.0f);
if ((x > 1.0f) && (x > a))
return(1.0f - igamcf(a, x));
/* Compute x**a * exp(-x) / gamma(a) */
ax = a * logf(x) - x - lgamf(a);
if (ax < -MAXLOGF)
{
// mtherr("igamf", UNDERFLOW);
return(0.0f);
}
ax = expf(ax);
/* power series */
r = a;
c = 1.0f;
ans = 1.0f;
do
{
r += 1.0f;
c *= x / r;
ans += c;
} while (c / ans > MACHEPF);
return(ans * ax / a);
}
__host__ __device__ float igamcf(float aa, float xx)
{
float a, x, ans, c, yc, ax, y, z;
float pk, pkm1, pkm2, qk, qkm1, qkm2;
float r, t;
// static float big = BIG;
a = aa;
x = xx;
if ((x <= 0) || (a <= 0))
return(1.0f);
if ((x < 1.0f) || (x < a))
return(1.0f - igamf(a, x));
ax = a * logf(x) - x - lgamf(a);
if (ax < -MAXLOGF)
{
// mtherr("igamcf", UNDERFLOW);
return(0.0f);
}
ax = expf(ax);
/* continued fraction */
y = 1.0f - a;
z = x + y + 1.0f;
c = 0.0;
pkm2 = 1.0f;
qkm2 = x;
pkm1 = x + 1.0f;
qkm1 = z * x;
ans = pkm1 / qkm1;
do
{
c += 1.0f;
y += 1.0f;
z += 2.0f;
yc = y * c;
pk = pkm1 * z - pkm2 * yc;
qk = qkm1 * z - qkm2 * yc;
if (qk != 0)
{
r = pk / qk;
t = fabsf((ans - r) / r);
ans = r;
}
else
t = 1.0f;
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
if (fabsf(pk) > BIG)
{
pkm2 *= MACHEPF;
pkm1 *= MACHEPF;
qkm2 *= MACHEPF;
qkm1 *= MACHEPF;
}
} while (t > MACHEPF);
return(ans * ax);
}
__host__ __device__ float igamif(float aa, float yy0)
{
float a, y0, d, y, x0, lgm;
int i;
if (yy0 > 0.5f)
{
//mtherr("igamif", PLOSS);
return 0.0f;
}
a = aa;
y0 = yy0;
// approximation to inverse function
d = 1.0f / (9.0f*a);
y = (1.0f - d - ndtrif(y0) * sqrtf(d));
x0 = a * y * y * y;
lgm = lgamf(a);
for (i = 0; i<10; i++)
{
if (x0 <= 0.0f)
{
// mtherr("igamif", UNDERFLOW);
return(0.0f);
}
y = igamcf(a, x0);
/* compute the derivative of the function at this point */
d = (a - 1.0f) * logf(x0) - x0 - lgm;
if (d < -MAXLOGF)
{
// mtherr("igamif", UNDERFLOW);
goto done;
}
d = -expf(d);
/* compute the step to the next approximation of x */
if (d == 0.0)
goto done;
d = (y - y0) / d;
x0 = x0 - d;
if (i < 3)
continue;
if (fabsf(d / x0) < (2.0f * MACHEPF))
goto done;
}
done:
return(x0);
}
__host__ __device__ float chdtrcf(float dff, float xx)
{
float df, x;
df = dff;
x = xx;
if ((x < 0.0f) || (df < 1.0f))
{
// mtherr("chdtrcf", DOMAIN);
return(0.0f);
}
return(igamcf(0.5f*df, 0.5f*x));
}
__host__ __device__ float chdtrf(float dff, float xx)
{
float df, x;
df = dff;
x = xx;
if ((x < 0.0f) || (df < 1.0f))
{
// mtherr("chdtrf", DOMAIN);
return(0.0);
}
return(igamf(0.5f*df, 0.5f*x));
}
__host__ __device__ float chdtrif(float dff, float yy)
{
float y, df, x;
y = yy;
df = dff;
if ((y < 0.0f) || (y > 1.0f) || (df < 1.0f))
{
return(0.0f);
}
x = igamif(0.5f * df, y);
return(2.0f * x);
}
//
// Return the area under the F-Distribution from x to +infinity.
// This represents the p-value.
//
__host__ __device__ float CumulativeFDistributionComplimentary(float dof1, float dof2, float x)
{
return fdtrcf(dof1, dof2, x);
}
// ==================================================================
// Anova kernel
// ==================================================================
template <int BLOCK_SIZEW, int BLOCK_SIZEH, int BLOCK_MASKW> __global__ void
AnovaKernel(
float * __restrict__ pVoxelSubject,
int * __restrict__ pSNPSubject,
float * __restrict__ pVoxelSNP,
int NumberOfSNPS,
int NumberOfSubjects,
int NumberOfVoxels)
{
// Block index
const int ddy = gridDim.y;
const int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
//
// First phase .. compute the between group variance.
//
while (by < (NumberOfVoxels+blockDim.y-1)/blockDim.y)
{
// Index of the first sub-matrix of VoxelSubject processed by the block
const int aBegin = NumberOfSubjects * BLOCK_SIZEW * by;
// Index of the last sub-matrix of VoxelSubject processed by the block
const int aEnd = aBegin + NumberOfSubjects - 1;
// Step size used to iterate through the sub-matrices of VoxelSubject
const int aStep = BLOCK_SIZEW;
// Index of the first sub-matrix of SNPSubject processed by the block
const int bBegin = NumberOfSubjects * BLOCK_SIZEH * bx;
// Step size used to iterate through the sub-matrices of SNPSubject
const int bStep = BLOCK_SIZEH;
__shared__ float s_VoxelSubject[BLOCK_SIZEH][BLOCK_SIZEW];
__shared__ int s_SNPSubject[BLOCK_SIZEH][BLOCK_SIZEW];
float n0 = 0.0f;
float n1 = 0.0f;
float n2 = 0.0f;
float sum0 = 0.0f;
float sum1 = 0.0f;
float sum2 = 0.0f;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
const int condVox = (a - aBegin + tx < NumberOfSubjects) && (by * blockDim.y + ty < NumberOfVoxels);
const int condSNP = (b - bBegin + tx < NumberOfSubjects) && (bx * blockDim.x + ty < NumberOfSNPS);
s_VoxelSubject[ty][tx] = (condVox == 1) ? pVoxelSubject[a + NumberOfSubjects * ty + tx] : 0.0f;
s_SNPSubject[ty][tx] = (condSNP == 1) ? pSNPSubject[b + NumberOfSubjects * ty + tx] : -1;
__syncthreads(); // Synchronize to make sure the matrices are loaded
#pragma unroll
for (int k = 0; k < BLOCK_SIZEW; k++)
{
//
// This access pattern guarantees no shared memory conflicts.
//
int SS = s_SNPSubject[tx][(k + tx) & BLOCK_MASKW];
float VS = s_VoxelSubject[ty][(k + tx) & BLOCK_MASKW];
const float C0 = (SS == 0); // Save the predicate result to avoid IF stmts
const float C1 = (SS == 1);
const float C2 = (SS == 2);
n0 += C0;
n1 += C1;
n2 += C2;
sum0 += C0*VS;
sum1 += C1*VS;
sum2 += C2*VS;
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of VoxelSubject and SNPSubject in the next iteration.
__syncthreads();
}
const float n = n0 + n1 + n2;
const float sum = sum0 + sum1 + sum2;
const float mean = sum / n;
const float mean0 = sum0 / fmaxf(n0, 1.0f);
const float mean1 = sum1 / fmaxf(n1, 1.0f);
const float mean2 = sum2 / fmaxf(n2, 1.0f);
const float T0 = mean0 - mean;
const float T1 = mean1 - mean;
const float T2 = mean2 - mean;
const float bg_var = (n0*T0*T0 + n1*T1*T1 + n2*T2*T2) / 2.0f;
//
// Second phase, compute the withing group variance.
//
float sumsq0 = 0.0f;
float sumsq1 = 0.0f;
float sumsq2 = 0.0f;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
const int condVox = (a - aBegin + tx < NumberOfSubjects) && (by * blockDim.y + ty < NumberOfVoxels);
const int condSNP = (b - bBegin + tx < NumberOfSubjects) && (bx * blockDim.x + ty < NumberOfSNPS);
s_VoxelSubject[ty][tx] = (condVox == 1) ? pVoxelSubject[a + NumberOfSubjects * ty + tx] : 0.0f;
s_SNPSubject[ty][tx] = (condSNP == 1) ? pSNPSubject[b + NumberOfSubjects * ty + tx] : -1;
__syncthreads(); // Synchronize to make sure the matrices are loaded
#pragma unroll
for (int k = 0; k < BLOCK_SIZEW; k++)
{
//
// This access pattern guarantees no shared memory conflicts.
//
int SS = s_SNPSubject[tx][(k + tx) & BLOCK_MASKW];
float VS = s_VoxelSubject[ty][(k + tx) & BLOCK_MASKW];
float C0 = (SS == 0); // Save the predicate result to avoid IF stmts
float C1 = (SS == 1);
float C2 = (SS == 2);
sumsq0 += C0*(VS - mean0)*(VS - mean0);
sumsq1 += C1*(VS - mean1)*(VS - mean1);
sumsq2 += C2*(VS - mean2)*(VS - mean2);
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of VoxelSubject and SNPSubject in the next iteration.
__syncthreads();
}
const int tidx = blockIdx.x * blockDim.x + tx;
const int tidy = by * blockDim.y + ty;
const float wg_var = (sumsq0 + sumsq1 + sumsq2) / (n - 3.0f);
const int c = NumberOfSNPS * BLOCK_SIZEH * by + BLOCK_SIZEW * bx;
if (tidx < NumberOfSNPS && tidy < NumberOfVoxels)
pVoxelSNP[c + NumberOfSNPS * ty + tx] = CumulativeFDistributionComplimentary(2.0f, n - 3.0f, bg_var / wg_var);
by += ddy;
}
}
// ==================================================================
// CopySubset kernel
// ==================================================================
template <typename T> __global__ void
CopySubsetKernel(
T * Src,
int * SrcList,
T * Dst,
int SrcH,
int SrcW,
int SrcListLen)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
if (y >= SrcH) return;
int srow = -1;
// Zero'th thread in each warp reads the row number
if ((threadIdx.x & 0x1F) == 0)
srow = SrcList[y];
srow = __shfl(srow, 0); // all threads in the warp read from laneid 0
// If you put this IF statment before the __shfl instruction, then the right most warps
// with less than 32 threads will hang up indefinitely.
if (x >= SrcW) return;
const int sidx = srow * SrcW + x;
const int didx = y * SrcW + x;
T val = Src[sidx];
Dst[didx] = val;
}
// ==================================================================
// DumpRam
// ==================================================================
void DumpRam(float *dS1, float *dS2, int off, int cnt)
{
float *hS1 = new float[cnt];
float *hS2 = new float[cnt];
hipMemcpy(hS1, dS1+off, cnt * 4, hipMemcpyDeviceToHost);
hipMemcpy(hS2, dS2+off, cnt * 4, hipMemcpyDeviceToHost);
delete[] hS1;
delete[] hS2;
}
// ==================================================================
// DoKernelAnova
// ==================================================================
#define BLK_SIZEW 16
#define BLK_SIZEH 16
#define BLK_MASKW 0xF
hipError_t DoKernelAnova(
float *VoxelSubject,
int *SNPSubject,
float *VoxelSNP,
int NumberOfSNPs,
int NumberOfSubjects,
int NumberOfVoxels,
int *VoxelList,
int VoxelListCount,
int *SNPList,
int SNPListCount)
{
//
// 1. GPUMalloc VoxelSubjectTemp of size (VoxelListCount, NumberOfSubjects)
// 2. Use VoxelList to copy the subset of VoxelSubject into VoxelSubjectTemp
//
// 3. GPUMalloc SNPSubjectTemp of size (SNPListCount, NumberOfSubjects)
// 4. Use SNPList to copy the subset of SNPSubject into SNPSubjectTemp
//
// 5. call Anova with VoxelSubjectTemp, SNPSubjectTemp, VoxelListCount, SNPListCount, NumberOfSubjects
//
// 6. GPUFree VoxelSubjectTemp
// 7. GPUFree SNPSubjectTemp
hipError_t error;
// Special case ... the host wants to process all of the voxels (could be as much as 8000000) but the
// SNPListCount=1. This result is a VERY tall matrix that is 1 element wide. The results
// are used to show how this 1 SNP affects every voxel in the 3D brain view.
//
if (VoxelListCount == NumberOfVoxels && SNPListCount == 1)
{
int *SNPSubjectTemp;
error = hipMalloc(&SNPSubjectTemp, SNPListCount*NumberOfSubjects * sizeof(int));
if (error != hipSuccess)
{
fprintf(stderr, "hipMalloc failed on SNPSubjectTemp (error code %s)!\n", hipGetErrorString(error));
return error;
}
{
dim3 block = dim3(512, 1, 1);
dim3 grid = dim3((NumberOfSubjects + block.x - 1) / block.x, SNPListCount, 1);
CopySubsetKernel<int> << <grid, block >> > (SNPSubject, SNPList, SNPSubjectTemp, NumberOfSNPs, NumberOfSubjects, SNPListCount);
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "CopySubsetKernel failed on SNPSubject (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
}
{
dim3 block = dim3(BLK_SIZEW, BLK_SIZEH, 1);
dim3 grid = dim3((SNPListCount + BLK_SIZEW - 1) / BLK_SIZEW, 16383, 1);
AnovaKernel<BLK_SIZEW, BLK_SIZEH, BLK_MASKW> << <grid, block >> > (VoxelSubject,
SNPSubjectTemp,
VoxelSNP,
SNPListCount,
NumberOfSubjects,
VoxelListCount
);
}
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "Failed to launch (VoxelListCount == NumberOfVoxels && SNPListCount == 1) (error code %s)!\n", hipGetErrorString(error));
}
hipFree(SNPSubjectTemp);
}
//
// This code will be called by the host when the data inside the window needs processed.
//
else
{
float *VoxelSubjectTemp;
int *SNPSubjectTemp;
error = hipMalloc(&VoxelSubjectTemp, VoxelListCount*NumberOfSubjects * sizeof(float));
if (error != hipSuccess)
{
fprintf(stderr, "hipMalloc failed on VoxelSubjectTemp (error code %s)!\n", hipGetErrorString(error));
return error;
}
error = hipMalloc(&SNPSubjectTemp, SNPListCount*NumberOfSubjects * sizeof(int));
if (error != hipSuccess)
{
fprintf(stderr, "hipMalloc failed on SNPSubjectTemp (error code %s)!\n", hipGetErrorString(error));
hipFree(VoxelSubjectTemp);
return error;
}
{
dim3 block = dim3(512, 1, 1);
dim3 grid = dim3((NumberOfSubjects + block.x - 1) / block.x, VoxelListCount, 1);
CopySubsetKernel<float> << <grid, block >> > (VoxelSubject, VoxelList, VoxelSubjectTemp, NumberOfVoxels, NumberOfSubjects, VoxelListCount);
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "CopySubsetKernel failed on VoxelSubject (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
}
{
dim3 block = dim3(512, 1, 1);
dim3 grid = dim3((NumberOfSubjects + block.x - 1) / block.x, SNPListCount, 1);
CopySubsetKernel<int> << <grid, block >> > (SNPSubject, SNPList, SNPSubjectTemp, NumberOfSNPs, NumberOfSubjects, SNPListCount);
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "CopySubsetKernel failed on SNPSubject (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
}
{
dim3 block = dim3(BLK_SIZEW, BLK_SIZEH, 1);
dim3 grid = dim3((SNPListCount + BLK_SIZEW - 1) / BLK_SIZEW, (VoxelListCount + BLK_SIZEH - 1) / BLK_SIZEH, 1);
AnovaKernel<BLK_SIZEW, BLK_SIZEH, BLK_MASKW> << <grid, block >> > (VoxelSubjectTemp,
SNPSubjectTemp,
VoxelSNP,
SNPListCount,
NumberOfSubjects,
VoxelListCount
);
}
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "Failed to launch (error code %s)!\n", hipGetErrorString(error));
}
hipFree(VoxelSubjectTemp);
hipFree(SNPSubjectTemp);
}
return error;
}
// ==================================================================
// DoKernelVegasTest
// ==================================================================
#define NumThreadsPerBlock 256
__global__ void GenerateRandomSequenceKernel(float *Dest, int NumberOfValues, hiprandState_t* States)
{
const int gtid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ hiprandState_t RT[NumThreadsPerBlock];
RT[threadIdx.x] = States[gtid];
for (int i = gtid; i < NumberOfValues; i += blockDim.x*gridDim.x)
{
float T = hiprand_normal(&RT[threadIdx.x]);
Dest[i] = T;
}
States[gtid] = RT[threadIdx.x];
}
__global__ void InitRandomKernel(unsigned int seed, hiprandState_t* States)
{
const int gtid = blockIdx.x * blockDim.x + threadIdx.x;
hiprandState_t S;
hiprand_init(gtid << 8, 0, 0, &S);
States[gtid] = S;
}
__global__ void GenerateRandomVariatesKernel(float *LDMatrix, int LDMatrixSize, float *N01VariatesBuffer, int N01Offset, int Length, float *ResultBuffer)
{
const int gtid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gtid; i < LDMatrixSize*Length; i += blockDim.x*gridDim.x)
{
float T = N01VariatesBuffer[N01Offset*LDMatrixSize*Length + i];
ResultBuffer[i] = T;
}
}
class KGene
{
public:
KGene() :
m_SnpPos(0),
m_SnpLen(0),
m_pLDMatrix(0),
m_LDMatrixSize(0)
{};
~KGene()
{
}
void AddSnpPos(int pos, int len);
void AddLDMatrix(uint64_t N, uint64_t MatrixPtr);
int GetSnpPos(void) { return m_SnpPos; }
int GetSnpLen(void) { return m_SnpLen; }
float *GetLDMatrixPtr(void) { return m_pLDMatrix; }
int GetLDMatrixSize(void) { return m_LDMatrixSize; }
private:
int m_SnpPos;
int m_SnpLen;
float* m_pLDMatrix;
int m_LDMatrixSize;
};
void KGene::AddSnpPos(int pos, int len)
{
m_SnpPos = pos;
m_SnpLen = len;
}
void KGene::AddLDMatrix(uint64_t N, uint64_t MatrixPtr)
{
m_pLDMatrix = (float *)MatrixPtr;
m_LDMatrixSize = (int)N;
// Just a sanity check to insure the values
// are equal. Unfortunately this implies that
// AddSnpPos() is called before AddLDMatrix().
//
assert(m_LDMatrixSize == m_SnpLen);
}
#if 0
// =========================================================================================================================
void ComputeObservedPvalues(CGene* G, int GeneNumber, int VoxelNumber, CArray2D<float> &VoxelSNP, CArray2D<float> &VoxelGeneObs)
{
double sum = 0;
for (int i = G->GetSnpPos(); i < G->GetSnpPos() + G->GetSnpLen(); i++)
{
const float PValue = VoxelSNP(VoxelNumber, i);
assert((1.0f - PValue) >= 0 && (1.0f - PValue) < 1);
// I hate to cast the chi2inv as a float, but we are trying to make this app
// as fast as possible.
const float ChiSquare = (float)chi2inv(1.0f - PValue, 1);
sum += ChiSquare;
}
VoxelGeneObs(VoxelNumber, GeneNumber) = (float)sum;
cout << "\tVoxel " << setw(3) << VoxelNumber << " Gene " << setw(3) << GeneNumber;
cout << " Snp-Pos " << setw(4) << G->GetSnpPos() << " Snp-Len " << setw(4) << G->GetSnpLen() << " Test Statistic " << sum << endl;
}
#endif
struct InvChiSq_functor : public thrust::unary_function<float, float>
{
__host__ __device__
float operator()(float x) const
{
// return (float)chi2inv(1.0f - PValue, 1);
return 1.0f / (x*x + 1.0f);
}
};
struct sumsq_functor
{
int R;
int C;
float *arr;
sumsq_functor(int _R, int _C, float *_arr) : R(_R), C(_C), arr(_arr) {};
__host__ __device__
float operator()(int myC) {
float sum = 0;
for (int i = 0; i < R; i++)
{
float T = arr[i*C + myC];
sum += T * T;
}
return sum;
}
};
struct compare_functor
{
float Thresh;
compare_functor(float Threshold) : Thresh(Threshold) {};
__host__ __device__
float operator()(float X)
{
return X > Thresh ? 1.0f : 0.0f;
}
};
hipError_t DoKernelVegasTest(float *dVoxelGeneObserved,
float *dVoxelGeneSim,
float *dVoxelSNP,
int *dSNPPosLenPairs,
uint64_t *dLDMatrixList,
int NumberOfVoxels,
int NumberOfGenes,
int NumberOfSNPs,
int NumberOfIterations,
int NumberOfSNPPositionLength
)
{
// 1. Create the GeneList
//
// 2. Compute the VoxelSNPkey vector
//
// 3. Convert each SNP p-value to it's chi square value, then reduce
// them according to the VoxelSNPkey vector, and write the result
// to the VoxelGeneObserved matrix.
//
// 4. PerformSimulation
// for each G in Genes
// RandNumberCount = NumberOfVoxels* SNPS in G * Iteration
// Launch Kernel to generate random N(0,1)'s
// for each V in Voxels
// P = pointer to next set of random N(0,1)'s
// Launch MatMul Kernel( LDMatrix_G and P )
// Plus reduce SNP p-values to GeneValue, Compare to Observed, Count
// Write Count/Iterations to VoxelGeneSim
// next V
// next G
//
// 5. Free GeneList
//
int numSMs;
const int devId = 0;
hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, devId);
//
// STEP 1 -- Create the Gene List.
//
vector<KGene*> GeneList;
int *hSNPPosLenPairs = new int[NumberOfGenes * 2];
uint64_t *hLDMatrixList = new uint64_t[NumberOfGenes * 2];
hipError_t err = hipMemcpy(hSNPPosLenPairs, dSNPPosLenPairs, NumberOfGenes * 2 * sizeof(int), hipMemcpyDeviceToHost);
assert(err == hipSuccess);
err = hipMemcpy(hLDMatrixList, dLDMatrixList, NumberOfGenes * 2 * sizeof(uint64_t), hipMemcpyDeviceToHost);
assert(err == hipSuccess);
int MaxSnpLen = 0;
for (int i = 0; i < NumberOfGenes; i++)
{
KGene* G = new KGene;
G->AddSnpPos(hSNPPosLenPairs[2 * i], hSNPPosLenPairs[2 * i + 1]);
G->AddLDMatrix(hLDMatrixList[2 * i], hLDMatrixList[2 * i + 1]);
if (G->GetSnpLen() > MaxSnpLen)
MaxSnpLen = G->GetSnpLen();
GeneList.push_back(G);
}
delete[] hSNPPosLenPairs;
delete[] hLDMatrixList;
//
// STEP 2
//
// First build a "mask" that will be used on the GPU to perform a
// "reduce by key" with "transformation". The Thrust library
// calls this mask a "key".
//
unsigned char *dVoxelSNPKey;
err = hipMalloc((void**)&dVoxelSNPKey, NumberOfVoxels*NumberOfSNPs);
assert(err == hipSuccess);
{
unsigned char *hVoxelSNPKey = new unsigned char[NumberOfVoxels*NumberOfSNPs];
int SnpSum = 0;
for (int i = 0; i < GeneList.size(); i++)
{
memset(hVoxelSNPKey + SnpSum, i, GeneList[i]->GetSnpLen());
SnpSum += GeneList[i]->GetSnpLen();
}
for (int i = 1; i < NumberOfVoxels; i++)
memcpy(hVoxelSNPKey + SnpSum*i, hVoxelSNPKey, SnpSum);
err = hipMemcpy(dVoxelSNPKey, hVoxelSNPKey, SnpSum*NumberOfVoxels, hipMemcpyHostToDevice);
delete[] hVoxelSNPKey;
}
//
// STEP 3
//
// Convert each SNP p-value to it's chi square value, then reduce
// them according to the VoxelSNPkey vector, and write the result
// to the VoxelGeneObserved matrix.
thrust::device_vector<unsigned char> d_OutputKeys(NumberOfVoxels*NumberOfGenes);
thrust::pair<thrust::device_vector<unsigned char>::iterator, float * > new_end;
new_end = thrust::reduce_by_key(thrust::device,
dVoxelSNPKey,
dVoxelSNPKey+ NumberOfVoxels*NumberOfSNPs,
thrust::make_transform_iterator(dVoxelSNP, InvChiSq_functor()),
d_OutputKeys.begin(),
dVoxelGeneObserved);
assert(new_end.first - d_OutputKeys.begin() == NumberOfVoxels*NumberOfGenes);
//
// This is kind of gross. Unfortunately I have to copy the dVoxelGeneObserved matrix
// back to the host because I have to pass each value into the loops below to act
// as a threshold against the computed Gene p-value.
//
float *hVoxelGeneObserved = new float[NumberOfVoxels*NumberOfGenes];
err = hipMemcpy(hVoxelGeneObserved, dVoxelGeneObserved, NumberOfVoxels*NumberOfGenes * sizeof(float), hipMemcpyDeviceToHost);
assert(err == hipSuccess);
//
// STEP 4
//
const int NumBlock = 512;
const int NumThreads = NumThreadsPerBlock;
hiprandState_t* States;
err = hipMalloc((void**)&States, NumBlock * NumThreads * sizeof(hiprandState_t));
assert(err == hipSuccess);
InitRandomKernel << <NumBlock, NumThreads >> > (time(0), States);
float *dRandomNumberBuffer;
err = hipMalloc(&dRandomNumberBuffer, MaxSnpLen * NumberOfVoxels * NumberOfIterations * sizeof(float));
assert(err == hipSuccess);
float *dIterationResultBuffer;
err = hipMalloc(&dIterationResultBuffer, MaxSnpLen * NumberOfIterations * sizeof(float));
assert(err == hipSuccess);
thrust::device_vector<int> keys(NumberOfIterations, 0);
//
// Now loop over every gene ...
// and loop over every voxel ...
// and compute the VoxelGenSim value.
//
for (int g = 0; g < GeneList.size(); g++)
{
const int RandomNumberCount = GeneList[g]->GetSnpLen() * NumberOfVoxels * NumberOfIterations;
GenerateRandomSequenceKernel << <numSMs * 32, NumThreadsPerBlock >> > (dRandomNumberBuffer, RandomNumberCount, States);
for (int v = 0; v < NumberOfVoxels; v++)
{
const int StartingIndex = (v * GeneList[g]->GetSnpLen() * NumberOfIterations) & (~0x01F);
GenerateRandomVariatesKernel << <NumBlock, NumThreads >> > (GeneList[g]->GetLDMatrixPtr(), GeneList[g]->GetLDMatrixSize(), dRandomNumberBuffer, v, NumberOfIterations, dIterationResultBuffer);
//
// Square every element in dIterationResultBuffer, Plus-reduce every column, threshold against dVoxelGeneObserved(g,v), Count results.
// Write Count / Iterations to dVoxelGeneSim(G,Vox)
thrust::device_vector<float> col_sums(NumberOfIterations);
thrust::sequence(col_sums.begin(), col_sums.end());
thrust::transform(col_sums.begin(), col_sums.end(), col_sums.begin(), sumsq_functor(GeneList[g]->GetLDMatrixSize(), NumberOfIterations, thrust::raw_pointer_cast(dIterationResultBuffer)));
thrust::transform(col_sums.begin(), col_sums.end(), col_sums.begin(), compare_functor(hVoxelGeneObserved[v*NumberOfGenes + g]));
thrust::reduce_by_key(thrust::device,
keys.begin(),
keys.end(),
col_sums.begin(),
thrust::make_discard_iterator(),
dVoxelGeneSim + v*NumberOfGenes + g );
}
}
thrust::transform(thrust::device,
dVoxelGeneSim,
dVoxelGeneSim+NumberOfVoxels*NumberOfGenes,
thrust::make_constant_iterator<float>(NumberOfIterations),
dVoxelGeneSim,
thrust::divides<float>());
//
// STEP 5
//
delete[] hVoxelGeneObserved;
hipFree(dIterationResultBuffer);
hipFree(dRandomNumberBuffer);
hipFree(States);
hipFree(dVoxelSNPKey);
while (!GeneList.empty())
{
KGene *G = GeneList.back();
GeneList.pop_back();
delete G;
}
return hipSuccess;
}
| b3d430ca9fc19fe54201b516e7d82054c128f93d.cu | //
// ServerKernels.cu
//
//
#include "Profile.h"
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <curand.h>
#include <iostream>
#include <assert.h>
#include <vector>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
// ==================================================================
// CudaEventTimer
//
// Example usage:
//
// T.Begin();
// generate << < blocks, threads >> > (gData, DataPerBlock);
// T.End();
// cudaError_t err = cudaGetLastError();
// printf("\nError = %s", cudaGetErrorString(err));
// printf("\nDuration of generate kernel = %.3f ms for %d floats\n\n", T.GetTime(), N);
// ==================================================================
class CudaEventTimer
{
public:
CudaEventTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~CudaEventTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); }
void Begin() { cudaEventRecord(start); }
void End() {
cudaEventRecord(stop);
cudaEventSynchronize(stop);
}
float GetTime(void) { cudaEventElapsedTime(&ms, start, stop); return ms; }
private:
cudaEvent_t start, stop;
float ms;
};
// ==================================================================
// ==================================================================
using namespace std;
/* BIG = 1/MACHEPF */
#define BIG 16777216.0f
#define MACHEPF 5.9604644775390625E-8f
/* MAXNUMF = 2^128 * (1 - 2^-24) */
#define MAXNUMF 3.4028234663852885981170418348451692544e38f
/* log(2^-149) */
#define MINLOGF -103.278929903431851103f
#define PIF 3.141592653589793238f
#define PIINV 0.318309886183790671538f
/* log( sqrt( 2*pi ) ) */
#define LS2PI 0.91893853320467274178f
#define MAXLGM 2.035093e36
#define MAXLOGF 88.72283905206835f
// sqrt(2pi)
#define s2pi 2.50662827463100050242f
/* log gamma(x+2), -.5 < x < .5 */
__host__ __device__ float polevlfB(float xx)
{
float t = 6.055172732649237E-004f;
t = t * xx - 1.311620815545743E-003f;
t = t * xx + 2.863437556468661E-003f;
t = t * xx - 7.366775108654962E-003f;
t = t * xx + 2.058355474821512E-002f;
t = t * xx - 6.735323259371034E-002f;
t = t * xx + 3.224669577325661E-001f;
t = t * xx + 4.227843421859038E-001f;
return t;
}
/* log gamma(x+1), -.25 < x < .25 */
__host__ __device__ float polevlfC(float xx)
{
float t = 1.369488127325832E-001f;
t = t * xx - 1.590086327657347E-001f;
t = t * xx + 1.692415923504637E-001f;
t = t * xx - 2.067882815621965E-001f;
t = t * xx + 2.705806208275915E-001f;
t = t * xx - 4.006931650563372E-001f;
t = t * xx + 8.224670749082976E-001f;
t = t * xx - 5.772156501719101E-001f;
return t;
}
/* approximation for 0 <= |y - 0.5| <= 3/8 */
__host__ __device__ float polevlfP0(float xx)
{
float t = -5.99633501014107895267E1f;
t = t * xx + 9.80010754185999661536E1f;
t = t * xx - 5.66762857469070293439E1f;
t = t * xx + 1.39312609387279679503E1f;
t = t * xx - 1.23916583867381258016E0f;
return t;
}
__host__ __device__ float p1evlfQ0(float xx)
{
float t = xx + 1.95448858338141759834E0f;
t = t * xx + 4.67627912898881538453E0f;
t = t * xx + 8.63602421390890590575E1f;
t = t * xx - 2.25462687854119370527E2f;
t = t * xx + 2.00260212380060660359E2f;
t = t * xx - 8.20372256168333339912E1f;
t = t * xx + 1.59056225126211695515E1f;
t = t * xx - 1.18331621121330003142E0f;
return t;
}
/* Approximation for interval z = sqrt(-2 log y ) between 2 and 8
* i.e., y between exp(-2) = .135 and exp(-32) = 1.27e-14.
*/
__host__ __device__ float polevlfP1(float xx)
{
float t = 4.05544892305962419923E0f;
t = t * xx + 3.15251094599893866154E1f;
t = t * xx + 5.71628192246421288162E1f;
t = t * xx + 4.40805073893200834700E1f;
t = t * xx + 1.46849561928858024014E1f;
t = t * xx + 2.18663306850790267539E0f;
t = t * xx - 1.40256079171354495875E-1f;
t = t * xx - 3.50424626827848203418E-2f;
t = t * xx - 8.57456785154685413611E-4f;
return t;
}
__host__ __device__ float p1evlfQ1(float xx)
{
float t = xx + 1.57799883256466749731E1f;
t = t * xx + 4.53907635128879210584E1f;
t = t * xx + 4.13172038254672030440E1f;
t = t * xx + 1.50425385692907503408E1f;
t = t * xx + 2.50464946208309415979E0f;
t = t * xx - 1.42182922854787788574E-1f;
t = t * xx - 3.80806407691578277194E-2f;
t = t * xx - 9.33259480895457427372E-4f;
return t;
}
/* Approximation for interval z = sqrt(-2 log y ) between 8 and 64
* i.e., y between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890.
*/
__host__ __device__ float polevlfP2(float xx)
{
float t = 3.23774891776946035970E0f;
t = t * xx + 6.91522889068984211695E0f;
t = t * xx + 3.93881025292474443415E0f;
t = t * xx + 1.33303460815807542389E0f;
t = t * xx + 2.01485389549179081538E-1f;
t = t * xx + 1.23716634817820021358E-2f;
t = t * xx + 3.01581553508235416007E-4f;
t = t * xx + 2.65806974686737550832E-6f;
t = t * xx + 6.23974539184983293730E-9f;
return t;
}
__host__ __device__ float p1evlfQ2(float xx)
{
float t = xx + 6.02427039364742014255E0f;
t = t * xx + 3.67983563856160859403E0f;
t = t * xx + 1.37702099489081330271E0f;
t = t * xx + 2.16236993594496635890E-1f;
t = t * xx + 1.34204006088543189037E-2f;
t = t * xx + 3.28014464682127739104E-4f;
t = t * xx + 2.89247864745380683936E-6f;
t = t * xx + 6.79019408009981274425E-9f;
return t;
}
__host__ __device__ float lgamf(float xx)
{
float p, q, w, z, x;
float nx, tx;
int i, direction;
int sgngamf = 1;
x = xx;
if (x < 0.0f)
{
q = -x;
w = lgamf(q); /* note this modifies sgngam! */
p = floorf(q);
if (p == q)
goto loverf;
i = (int)p;
if ((i & 1) == 0)
sgngamf = -1;
else
sgngamf = 1;
z = q - p;
if (z > 0.5f)
{
p += 1.0f;
z = p - q;
}
z = q * sinf(PIF * z);
if (z == 0.0)
goto loverf;
z = -logf(PIINV*z) - w;
return(z);
}
if (x < 6.5f)
{
direction = 0;
z = 1.0;
tx = x;
nx = 0.0;
if (x >= 1.5)
{
while (tx > 2.5f)
{
nx -= 1.0f;
tx = x + nx;
z *= tx;
}
x += nx - 2.0f;
iv1r5:
p = x * polevlfB(x);
goto cont;
}
if (x >= 1.25f)
{
z *= x;
x -= 1.0f; /* x + 1 - 2 */
direction = 1;
goto iv1r5;
}
if (x >= 0.75f)
{
x -= 1.0f;
p = x * polevlfC(x);
q = 0.0f;
goto contz;
}
while (tx < 1.5f)
{
if (tx == 0.0f)
goto loverf;
z *= tx;
nx += 1.0f;
tx = x + nx;
}
direction = 1;
x += nx - 2.0f;
p = x * polevlfB(x);
cont:
if (z < 0.0f)
{
sgngamf = -1;
z = -z;
}
else
{
sgngamf = 1;
}
q = logf(z);
if (direction)
q = -q;
contz:
return(p + q);
}
if (x > MAXLGM)
{
loverf:
return(sgngamf * MAXNUMF); // overflow
}
// Note, though an asymptotic formula could be used for x >= 3,
// there is cancellation error in the following if x < 6.5.
q = LS2PI - x;
q += (x - 0.5f) * logf(x);
if (x <= 1.0e4)
{
z = 1.0f / x;
p = z * z;
q += ((6.789774945028216E-004f * p
- 2.769887652139868E-003f) * p
+ 8.333316229807355E-002f) * z;
}
return(q);
}
//
// Continued fraction expansion #1 for incomplete beta integral.
//
__host__ __device__ float incbcff(float aa, float bb, float xx)
{
float a, b, x, xk, pk, pkm1, pkm2, qk, qkm1, qkm2;
float k1, k2, k3, k4, k5, k6, k7, k8;
float r, t, ans;
int n;
a = aa;
b = bb;
x = xx;
k1 = a;
k2 = a + b;
k3 = a;
k4 = a + 1.0f;
k5 = 1.0f;
k6 = b - 1.0f;
k7 = k4;
k8 = a + 2.0f;
pkm2 = 0.0f;
qkm2 = 1.0f;
pkm1 = 1.0f;
qkm1 = 1.0f;
ans = 1.0f;
r = 0.0f;
n = 0;
do
{
xk = -(x * k1 * k2) / (k3 * k4);
pk = pkm1 + pkm2 * xk;
qk = qkm1 + qkm2 * xk;
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
xk = (x * k5 * k6) / (k7 * k8);
pk = pkm1 + pkm2 * xk;
qk = qkm1 + qkm2 * xk;
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
if (qk != 0)
r = pk / qk;
if (r != 0)
{
t = fabsf((ans - r) / r);
ans = r;
}
else
t = 1.0f;
if (t < MACHEPF) return (ans);
k1 += 1.0f;
k2 += 1.0f;
k3 += 2.0f;
k4 += 2.0f;
k5 += 1.0f;
k6 -= 1.0f;
k7 += 2.0f;
k8 += 2.0f;
if ((fabsf(qk) + fabsf(pk)) > BIG)
{
pkm2 *= MACHEPF;
pkm1 *= MACHEPF;
qkm2 *= MACHEPF;
qkm1 *= MACHEPF;
}
if ((fabsf(qk) < MACHEPF) || (fabsf(pk) < MACHEPF))
{
pkm2 *= BIG;
pkm1 *= BIG;
qkm2 *= BIG;
qkm1 *= BIG;
}
} while (++n < 100);
return(ans);
}
//
// Continued fraction expansion #2 for incomplete beta integral.
//
__host__ __device__ float incbdf(float aa, float bb, float xx)
{
float a, b, x, xk, pk, pkm1, pkm2, qk, qkm1, qkm2;
float k1, k2, k3, k4, k5, k6, k7, k8;
float r, t, ans, z;
int n;
a = aa;
b = bb;
x = xx;
k1 = a;
k2 = b - 1.0f;
k3 = a;
k4 = a + 1.0f;
k5 = 1.0f;
k6 = a + b;
k7 = a + 1.0f;
k8 = a + 2.0f;
pkm2 = 0.0f;
qkm2 = 1.0f;
pkm1 = 1.0f;
qkm1 = 1.0f;
z = x / (1.0f - x);
ans = 1.0f;
r = 0.0f;
n = 0;
do
{
xk = -(z * k1 * k2) / (k3 * k4);
pk = pkm1 + pkm2 * xk;
qk = qkm1 + qkm2 * xk;
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
xk = (z * k5 * k6) / (k7 * k8);
pk = pkm1 + pkm2 * xk;
qk = qkm1 + qkm2 * xk;
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
if (qk != 0)
r = pk / qk;
if (r != 0)
{
t = fabsf((ans - r) / r);
ans = r;
}
else
t = 1.0f;
if (t < MACHEPF) return ans; // underflow
k1 += 1.0f;
k2 -= 1.0f;
k3 += 2.0f;
k4 += 2.0f;
k5 += 1.0f;
k6 += 1.0f;
k7 += 2.0f;
k8 += 2.0f;
if ((fabsf(qk) + fabsf(pk)) > BIG)
{
pkm2 *= MACHEPF;
pkm1 *= MACHEPF;
qkm2 *= MACHEPF;
qkm1 *= MACHEPF;
}
if ((fabsf(qk) < MACHEPF) || (fabsf(pk) < MACHEPF))
{
pkm2 *= BIG;
pkm1 *= BIG;
qkm2 *= BIG;
qkm1 *= BIG;
}
} while (++n < 100);
return(ans);
}
__host__ __device__ float incbpsf(float aa, float bb, float xx)
{
float a, b, x, t, u, y, s;
a = aa;
b = bb;
x = xx;
y = a * logf(x) + (b - 1.0f)*logf(1.0f - x) - logf(a);
y -= lgamf(a) + lgamf(b);
y += lgamf(a + b);
t = x / (1.0f - x);
s = 0.0f;
u = 1.0f;
do
{
b -= 1.0f;
if (b == 0.0f)
break;
a += 1.0f;
u *= t*b / a;
s += u;
} while (fabsf(u) > MACHEPF);
if (y < MINLOGF)
{
s = 0.0f; // underflow
}
else
s = expf(y) * (1.0f + s);
return(s);
}
__host__ __device__ float incbetf(float aa, float bb, float xx)
{
float ans, a, b, t, x, onemx;
int flag;
if ((xx <= 0.0f) || (xx >= 1.0f))
{
if (xx == 0.0f)
return(0.0f);
if (xx == 1.0f)
return(1.0f);
return(0.0f);
}
onemx = 1.0f - xx;
// Transformation for small aa.
if (aa <= 1.0f)
{
ans = incbetf(aa + 1.0f, bb, xx);
t = aa*logf(xx) + bb*logf(1.0f - xx)
+ lgamf(aa + bb) - lgamf(aa + 1.0f) - lgamf(bb);
if (t > MINLOGF)
ans += expf(t);
return(ans);
}
// see if x is greater than the mean.
if (xx > (aa / (aa + bb)))
{
flag = 1;
a = bb;
b = aa;
t = xx;
x = onemx;
}
else
{
flag = 0;
a = aa;
b = bb;
t = onemx;
x = xx;
}
// Choose expansion for optimal convergence.
if (b > 10.0f)
{
if (fabsf(b*x / a) < 0.3f)
{
t = incbpsf(a, b, x);
goto bdone;
}
}
ans = x * (a + b - 2.0f) / (a - 1.0f);
if (ans < 1.0f)
{
ans = incbcff(a, b, x);
t = b * logf(t);
}
else
{
ans = incbdf(a, b, x);
t = (b - 1.0f) * logf(t);
}
t += a*logf(x) + lgamf(a + b) - lgamf(a) - lgamf(b);
t += logf(ans / a);
if (t < MINLOGF)
{
t = 0.0f; // underflow
}
else
{
t = expf(t);
}
bdone:
if (flag)
t = 1.0f - t;
return(t);
}
__host__ __device__ float fdtrcf(float a, float b, float x)
{
float w;
if ((a < 1.0f) || (b < 1.0f) || (x < 0.0f))
{
return(0.0f);
}
w = b / (b + a * x);
return incbetf(0.5f*b, 0.5f*a, w);
}
__host__ __device__ float ndtrif(float yy0)
{
float y0, x, y, z, y2, x0, x1;
int code;
y0 = yy0;
if (y0 <= 0.0f)
{
// mtherr("ndtrif", DOMAIN);
return(-MAXNUMF);
}
if (y0 >= 1.0f)
{
// mtherr("ndtrif", DOMAIN);
return(MAXNUMF);
}
code = 1;
y = y0;
if (y > (1.0 - 0.13533528323661269189f)) /* 0.135... = exp(-2) */
{
y = 1.0f - y;
code = 0;
}
if (y > 0.13533528323661269189f)
{
y = y - 0.5f;
y2 = y * y;
// x = y + y * (y2 * polevlf(y2, P0, 4) / p1evlf(y2, Q0, 8));
x = y + y * (y2 * polevlfP0(y2) / p1evlfQ0(y2));
x = x * s2pi;
return(x);
}
x = sqrtf(-2.0f * logf(y));
x0 = x - logf(x) / x;
z = 1.0f / x;
if (x < 8.0f) /* y > exp(-32) = 1.2664165549e-14 */
// x1 = z * polevlf(z, P1, 8) / p1evlf(z, Q1, 8);
x1 = z * polevlfP1(z) / p1evlfQ1(z);
else
// x1 = z * polevlf(z, P2, 8) / p1evlf(z, Q2, 8);
x1 = z * polevlfP2(z) / p1evlfQ2(z);
x = x0 - x1;
if (code != 0)
x = -x;
return(x);
}
__host__ __device__ float igamcf(float aa, float xx);
__host__ __device__ float igamf(float aa, float xx)
{
float a, x, ans, ax, c, r;
a = aa;
x = xx;
if ((x <= 0) || (a <= 0))
return(0.0f);
if ((x > 1.0f) && (x > a))
return(1.0f - igamcf(a, x));
/* Compute x**a * exp(-x) / gamma(a) */
ax = a * logf(x) - x - lgamf(a);
if (ax < -MAXLOGF)
{
// mtherr("igamf", UNDERFLOW);
return(0.0f);
}
ax = expf(ax);
/* power series */
r = a;
c = 1.0f;
ans = 1.0f;
do
{
r += 1.0f;
c *= x / r;
ans += c;
} while (c / ans > MACHEPF);
return(ans * ax / a);
}
__host__ __device__ float igamcf(float aa, float xx)
{
float a, x, ans, c, yc, ax, y, z;
float pk, pkm1, pkm2, qk, qkm1, qkm2;
float r, t;
// static float big = BIG;
a = aa;
x = xx;
if ((x <= 0) || (a <= 0))
return(1.0f);
if ((x < 1.0f) || (x < a))
return(1.0f - igamf(a, x));
ax = a * logf(x) - x - lgamf(a);
if (ax < -MAXLOGF)
{
// mtherr("igamcf", UNDERFLOW);
return(0.0f);
}
ax = expf(ax);
/* continued fraction */
y = 1.0f - a;
z = x + y + 1.0f;
c = 0.0;
pkm2 = 1.0f;
qkm2 = x;
pkm1 = x + 1.0f;
qkm1 = z * x;
ans = pkm1 / qkm1;
do
{
c += 1.0f;
y += 1.0f;
z += 2.0f;
yc = y * c;
pk = pkm1 * z - pkm2 * yc;
qk = qkm1 * z - qkm2 * yc;
if (qk != 0)
{
r = pk / qk;
t = fabsf((ans - r) / r);
ans = r;
}
else
t = 1.0f;
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
if (fabsf(pk) > BIG)
{
pkm2 *= MACHEPF;
pkm1 *= MACHEPF;
qkm2 *= MACHEPF;
qkm1 *= MACHEPF;
}
} while (t > MACHEPF);
return(ans * ax);
}
__host__ __device__ float igamif(float aa, float yy0)
{
float a, y0, d, y, x0, lgm;
int i;
if (yy0 > 0.5f)
{
//mtherr("igamif", PLOSS);
return 0.0f;
}
a = aa;
y0 = yy0;
// approximation to inverse function
d = 1.0f / (9.0f*a);
y = (1.0f - d - ndtrif(y0) * sqrtf(d));
x0 = a * y * y * y;
lgm = lgamf(a);
for (i = 0; i<10; i++)
{
if (x0 <= 0.0f)
{
// mtherr("igamif", UNDERFLOW);
return(0.0f);
}
y = igamcf(a, x0);
/* compute the derivative of the function at this point */
d = (a - 1.0f) * logf(x0) - x0 - lgm;
if (d < -MAXLOGF)
{
// mtherr("igamif", UNDERFLOW);
goto done;
}
d = -expf(d);
/* compute the step to the next approximation of x */
if (d == 0.0)
goto done;
d = (y - y0) / d;
x0 = x0 - d;
if (i < 3)
continue;
if (fabsf(d / x0) < (2.0f * MACHEPF))
goto done;
}
done:
return(x0);
}
__host__ __device__ float chdtrcf(float dff, float xx)
{
float df, x;
df = dff;
x = xx;
if ((x < 0.0f) || (df < 1.0f))
{
// mtherr("chdtrcf", DOMAIN);
return(0.0f);
}
return(igamcf(0.5f*df, 0.5f*x));
}
__host__ __device__ float chdtrf(float dff, float xx)
{
float df, x;
df = dff;
x = xx;
if ((x < 0.0f) || (df < 1.0f))
{
// mtherr("chdtrf", DOMAIN);
return(0.0);
}
return(igamf(0.5f*df, 0.5f*x));
}
__host__ __device__ float chdtrif(float dff, float yy)
{
float y, df, x;
y = yy;
df = dff;
if ((y < 0.0f) || (y > 1.0f) || (df < 1.0f))
{
return(0.0f);
}
x = igamif(0.5f * df, y);
return(2.0f * x);
}
//
// Return the area under the F-Distribution from x to +infinity.
// This represents the p-value.
//
__host__ __device__ float CumulativeFDistributionComplimentary(float dof1, float dof2, float x)
{
return fdtrcf(dof1, dof2, x);
}
// ==================================================================
// Anova kernel
// ==================================================================
template <int BLOCK_SIZEW, int BLOCK_SIZEH, int BLOCK_MASKW> __global__ void
AnovaKernel(
float * __restrict__ pVoxelSubject,
int * __restrict__ pSNPSubject,
float * __restrict__ pVoxelSNP,
int NumberOfSNPS,
int NumberOfSubjects,
int NumberOfVoxels)
{
// Block index
const int ddy = gridDim.y;
const int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
//
// First phase .. compute the between group variance.
//
while (by < (NumberOfVoxels+blockDim.y-1)/blockDim.y)
{
// Index of the first sub-matrix of VoxelSubject processed by the block
const int aBegin = NumberOfSubjects * BLOCK_SIZEW * by;
// Index of the last sub-matrix of VoxelSubject processed by the block
const int aEnd = aBegin + NumberOfSubjects - 1;
// Step size used to iterate through the sub-matrices of VoxelSubject
const int aStep = BLOCK_SIZEW;
// Index of the first sub-matrix of SNPSubject processed by the block
const int bBegin = NumberOfSubjects * BLOCK_SIZEH * bx;
// Step size used to iterate through the sub-matrices of SNPSubject
const int bStep = BLOCK_SIZEH;
__shared__ float s_VoxelSubject[BLOCK_SIZEH][BLOCK_SIZEW];
__shared__ int s_SNPSubject[BLOCK_SIZEH][BLOCK_SIZEW];
float n0 = 0.0f;
float n1 = 0.0f;
float n2 = 0.0f;
float sum0 = 0.0f;
float sum1 = 0.0f;
float sum2 = 0.0f;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
const int condVox = (a - aBegin + tx < NumberOfSubjects) && (by * blockDim.y + ty < NumberOfVoxels);
const int condSNP = (b - bBegin + tx < NumberOfSubjects) && (bx * blockDim.x + ty < NumberOfSNPS);
s_VoxelSubject[ty][tx] = (condVox == 1) ? pVoxelSubject[a + NumberOfSubjects * ty + tx] : 0.0f;
s_SNPSubject[ty][tx] = (condSNP == 1) ? pSNPSubject[b + NumberOfSubjects * ty + tx] : -1;
__syncthreads(); // Synchronize to make sure the matrices are loaded
#pragma unroll
for (int k = 0; k < BLOCK_SIZEW; k++)
{
//
// This access pattern guarantees no shared memory conflicts.
//
int SS = s_SNPSubject[tx][(k + tx) & BLOCK_MASKW];
float VS = s_VoxelSubject[ty][(k + tx) & BLOCK_MASKW];
const float C0 = (SS == 0); // Save the predicate result to avoid IF stmts
const float C1 = (SS == 1);
const float C2 = (SS == 2);
n0 += C0;
n1 += C1;
n2 += C2;
sum0 += C0*VS;
sum1 += C1*VS;
sum2 += C2*VS;
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of VoxelSubject and SNPSubject in the next iteration.
__syncthreads();
}
const float n = n0 + n1 + n2;
const float sum = sum0 + sum1 + sum2;
const float mean = sum / n;
const float mean0 = sum0 / fmaxf(n0, 1.0f);
const float mean1 = sum1 / fmaxf(n1, 1.0f);
const float mean2 = sum2 / fmaxf(n2, 1.0f);
const float T0 = mean0 - mean;
const float T1 = mean1 - mean;
const float T2 = mean2 - mean;
const float bg_var = (n0*T0*T0 + n1*T1*T1 + n2*T2*T2) / 2.0f;
//
// Second phase, compute the withing group variance.
//
float sumsq0 = 0.0f;
float sumsq1 = 0.0f;
float sumsq2 = 0.0f;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
const int condVox = (a - aBegin + tx < NumberOfSubjects) && (by * blockDim.y + ty < NumberOfVoxels);
const int condSNP = (b - bBegin + tx < NumberOfSubjects) && (bx * blockDim.x + ty < NumberOfSNPS);
s_VoxelSubject[ty][tx] = (condVox == 1) ? pVoxelSubject[a + NumberOfSubjects * ty + tx] : 0.0f;
s_SNPSubject[ty][tx] = (condSNP == 1) ? pSNPSubject[b + NumberOfSubjects * ty + tx] : -1;
__syncthreads(); // Synchronize to make sure the matrices are loaded
#pragma unroll
for (int k = 0; k < BLOCK_SIZEW; k++)
{
//
// This access pattern guarantees no shared memory conflicts.
//
int SS = s_SNPSubject[tx][(k + tx) & BLOCK_MASKW];
float VS = s_VoxelSubject[ty][(k + tx) & BLOCK_MASKW];
float C0 = (SS == 0); // Save the predicate result to avoid IF stmts
float C1 = (SS == 1);
float C2 = (SS == 2);
sumsq0 += C0*(VS - mean0)*(VS - mean0);
sumsq1 += C1*(VS - mean1)*(VS - mean1);
sumsq2 += C2*(VS - mean2)*(VS - mean2);
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of VoxelSubject and SNPSubject in the next iteration.
__syncthreads();
}
const int tidx = blockIdx.x * blockDim.x + tx;
const int tidy = by * blockDim.y + ty;
const float wg_var = (sumsq0 + sumsq1 + sumsq2) / (n - 3.0f);
const int c = NumberOfSNPS * BLOCK_SIZEH * by + BLOCK_SIZEW * bx;
if (tidx < NumberOfSNPS && tidy < NumberOfVoxels)
pVoxelSNP[c + NumberOfSNPS * ty + tx] = CumulativeFDistributionComplimentary(2.0f, n - 3.0f, bg_var / wg_var);
by += ddy;
}
}
// ==================================================================
// CopySubset kernel
// ==================================================================
template <typename T> __global__ void
CopySubsetKernel(
T * Src,
int * SrcList,
T * Dst,
int SrcH,
int SrcW,
int SrcListLen)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
if (y >= SrcH) return;
int srow = -1;
// Zero'th thread in each warp reads the row number
if ((threadIdx.x & 0x1F) == 0)
srow = SrcList[y];
srow = __shfl(srow, 0); // all threads in the warp read from laneid 0
// If you put this IF statment before the __shfl instruction, then the right most warps
// with less than 32 threads will hang up indefinitely.
if (x >= SrcW) return;
const int sidx = srow * SrcW + x;
const int didx = y * SrcW + x;
T val = Src[sidx];
Dst[didx] = val;
}
// ==================================================================
// DumpRam
// ==================================================================
void DumpRam(float *dS1, float *dS2, int off, int cnt)
{
float *hS1 = new float[cnt];
float *hS2 = new float[cnt];
cudaMemcpy(hS1, dS1+off, cnt * 4, cudaMemcpyDeviceToHost);
cudaMemcpy(hS2, dS2+off, cnt * 4, cudaMemcpyDeviceToHost);
delete[] hS1;
delete[] hS2;
}
// ==================================================================
// DoKernelAnova
// ==================================================================
#define BLK_SIZEW 16
#define BLK_SIZEH 16
#define BLK_MASKW 0xF
cudaError_t DoKernelAnova(
float *VoxelSubject,
int *SNPSubject,
float *VoxelSNP,
int NumberOfSNPs,
int NumberOfSubjects,
int NumberOfVoxels,
int *VoxelList,
int VoxelListCount,
int *SNPList,
int SNPListCount)
{
//
// 1. GPUMalloc VoxelSubjectTemp of size (VoxelListCount, NumberOfSubjects)
// 2. Use VoxelList to copy the subset of VoxelSubject into VoxelSubjectTemp
//
// 3. GPUMalloc SNPSubjectTemp of size (SNPListCount, NumberOfSubjects)
// 4. Use SNPList to copy the subset of SNPSubject into SNPSubjectTemp
//
// 5. call Anova with VoxelSubjectTemp, SNPSubjectTemp, VoxelListCount, SNPListCount, NumberOfSubjects
//
// 6. GPUFree VoxelSubjectTemp
// 7. GPUFree SNPSubjectTemp
cudaError_t error;
// Special case ... the host wants to process all of the voxels (could be as much as 8000000) but the
// SNPListCount=1. This result is a VERY tall matrix that is 1 element wide. The results
// are used to show how this 1 SNP affects every voxel in the 3D brain view.
//
if (VoxelListCount == NumberOfVoxels && SNPListCount == 1)
{
int *SNPSubjectTemp;
error = cudaMalloc(&SNPSubjectTemp, SNPListCount*NumberOfSubjects * sizeof(int));
if (error != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed on SNPSubjectTemp (error code %s)!\n", cudaGetErrorString(error));
return error;
}
{
dim3 block = dim3(512, 1, 1);
dim3 grid = dim3((NumberOfSubjects + block.x - 1) / block.x, SNPListCount, 1);
CopySubsetKernel<int> << <grid, block >> > (SNPSubject, SNPList, SNPSubjectTemp, NumberOfSNPs, NumberOfSubjects, SNPListCount);
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "CopySubsetKernel failed on SNPSubject (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
{
dim3 block = dim3(BLK_SIZEW, BLK_SIZEH, 1);
dim3 grid = dim3((SNPListCount + BLK_SIZEW - 1) / BLK_SIZEW, 16383, 1);
AnovaKernel<BLK_SIZEW, BLK_SIZEH, BLK_MASKW> << <grid, block >> > (VoxelSubject,
SNPSubjectTemp,
VoxelSNP,
SNPListCount,
NumberOfSubjects,
VoxelListCount
);
}
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to launch (VoxelListCount == NumberOfVoxels && SNPListCount == 1) (error code %s)!\n", cudaGetErrorString(error));
}
cudaFree(SNPSubjectTemp);
}
//
// This code will be called by the host when the data inside the window needs processed.
//
else
{
float *VoxelSubjectTemp;
int *SNPSubjectTemp;
error = cudaMalloc(&VoxelSubjectTemp, VoxelListCount*NumberOfSubjects * sizeof(float));
if (error != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed on VoxelSubjectTemp (error code %s)!\n", cudaGetErrorString(error));
return error;
}
error = cudaMalloc(&SNPSubjectTemp, SNPListCount*NumberOfSubjects * sizeof(int));
if (error != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed on SNPSubjectTemp (error code %s)!\n", cudaGetErrorString(error));
cudaFree(VoxelSubjectTemp);
return error;
}
{
dim3 block = dim3(512, 1, 1);
dim3 grid = dim3((NumberOfSubjects + block.x - 1) / block.x, VoxelListCount, 1);
CopySubsetKernel<float> << <grid, block >> > (VoxelSubject, VoxelList, VoxelSubjectTemp, NumberOfVoxels, NumberOfSubjects, VoxelListCount);
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "CopySubsetKernel failed on VoxelSubject (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
{
dim3 block = dim3(512, 1, 1);
dim3 grid = dim3((NumberOfSubjects + block.x - 1) / block.x, SNPListCount, 1);
CopySubsetKernel<int> << <grid, block >> > (SNPSubject, SNPList, SNPSubjectTemp, NumberOfSNPs, NumberOfSubjects, SNPListCount);
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "CopySubsetKernel failed on SNPSubject (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
{
dim3 block = dim3(BLK_SIZEW, BLK_SIZEH, 1);
dim3 grid = dim3((SNPListCount + BLK_SIZEW - 1) / BLK_SIZEW, (VoxelListCount + BLK_SIZEH - 1) / BLK_SIZEH, 1);
AnovaKernel<BLK_SIZEW, BLK_SIZEH, BLK_MASKW> << <grid, block >> > (VoxelSubjectTemp,
SNPSubjectTemp,
VoxelSNP,
SNPListCount,
NumberOfSubjects,
VoxelListCount
);
}
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to launch (error code %s)!\n", cudaGetErrorString(error));
}
cudaFree(VoxelSubjectTemp);
cudaFree(SNPSubjectTemp);
}
return error;
}
// ==================================================================
// DoKernelVegasTest
// ==================================================================
#define NumThreadsPerBlock 256
__global__ void GenerateRandomSequenceKernel(float *Dest, int NumberOfValues, curandState_t* States)
{
const int gtid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ curandState_t RT[NumThreadsPerBlock];
RT[threadIdx.x] = States[gtid];
for (int i = gtid; i < NumberOfValues; i += blockDim.x*gridDim.x)
{
float T = curand_normal(&RT[threadIdx.x]);
Dest[i] = T;
}
States[gtid] = RT[threadIdx.x];
}
__global__ void InitRandomKernel(unsigned int seed, curandState_t* States)
{
const int gtid = blockIdx.x * blockDim.x + threadIdx.x;
curandState_t S;
curand_init(gtid << 8, 0, 0, &S);
States[gtid] = S;
}
__global__ void GenerateRandomVariatesKernel(float *LDMatrix, int LDMatrixSize, float *N01VariatesBuffer, int N01Offset, int Length, float *ResultBuffer)
{
const int gtid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = gtid; i < LDMatrixSize*Length; i += blockDim.x*gridDim.x)
{
float T = N01VariatesBuffer[N01Offset*LDMatrixSize*Length + i];
ResultBuffer[i] = T;
}
}
class KGene
{
public:
KGene() :
m_SnpPos(0),
m_SnpLen(0),
m_pLDMatrix(0),
m_LDMatrixSize(0)
{};
~KGene()
{
}
void AddSnpPos(int pos, int len);
void AddLDMatrix(uint64_t N, uint64_t MatrixPtr);
int GetSnpPos(void) { return m_SnpPos; }
int GetSnpLen(void) { return m_SnpLen; }
float *GetLDMatrixPtr(void) { return m_pLDMatrix; }
int GetLDMatrixSize(void) { return m_LDMatrixSize; }
private:
int m_SnpPos;
int m_SnpLen;
float* m_pLDMatrix;
int m_LDMatrixSize;
};
void KGene::AddSnpPos(int pos, int len)
{
m_SnpPos = pos;
m_SnpLen = len;
}
void KGene::AddLDMatrix(uint64_t N, uint64_t MatrixPtr)
{
m_pLDMatrix = (float *)MatrixPtr;
m_LDMatrixSize = (int)N;
// Just a sanity check to insure the values
// are equal. Unfortunately this implies that
// AddSnpPos() is called before AddLDMatrix().
//
assert(m_LDMatrixSize == m_SnpLen);
}
#if 0
// =========================================================================================================================
void ComputeObservedPvalues(CGene* G, int GeneNumber, int VoxelNumber, CArray2D<float> &VoxelSNP, CArray2D<float> &VoxelGeneObs)
{
double sum = 0;
for (int i = G->GetSnpPos(); i < G->GetSnpPos() + G->GetSnpLen(); i++)
{
const float PValue = VoxelSNP(VoxelNumber, i);
assert((1.0f - PValue) >= 0 && (1.0f - PValue) < 1);
// I hate to cast the chi2inv as a float, but we are trying to make this app
// as fast as possible.
const float ChiSquare = (float)chi2inv(1.0f - PValue, 1);
sum += ChiSquare;
}
VoxelGeneObs(VoxelNumber, GeneNumber) = (float)sum;
cout << "\tVoxel " << setw(3) << VoxelNumber << " Gene " << setw(3) << GeneNumber;
cout << " Snp-Pos " << setw(4) << G->GetSnpPos() << " Snp-Len " << setw(4) << G->GetSnpLen() << " Test Statistic " << sum << endl;
}
#endif
struct InvChiSq_functor : public thrust::unary_function<float, float>
{
__host__ __device__
float operator()(float x) const
{
// return (float)chi2inv(1.0f - PValue, 1);
return 1.0f / (x*x + 1.0f);
}
};
struct sumsq_functor
{
int R;
int C;
float *arr;
sumsq_functor(int _R, int _C, float *_arr) : R(_R), C(_C), arr(_arr) {};
__host__ __device__
float operator()(int myC) {
float sum = 0;
for (int i = 0; i < R; i++)
{
float T = arr[i*C + myC];
sum += T * T;
}
return sum;
}
};
struct compare_functor
{
float Thresh;
compare_functor(float Threshold) : Thresh(Threshold) {};
__host__ __device__
float operator()(float X)
{
return X > Thresh ? 1.0f : 0.0f;
}
};
cudaError_t DoKernelVegasTest(float *dVoxelGeneObserved,
float *dVoxelGeneSim,
float *dVoxelSNP,
int *dSNPPosLenPairs,
uint64_t *dLDMatrixList,
int NumberOfVoxels,
int NumberOfGenes,
int NumberOfSNPs,
int NumberOfIterations,
int NumberOfSNPPositionLength
)
{
// 1. Create the GeneList
//
// 2. Compute the VoxelSNPkey vector
//
// 3. Convert each SNP p-value to it's chi square value, then reduce
// them according to the VoxelSNPkey vector, and write the result
// to the VoxelGeneObserved matrix.
//
// 4. PerformSimulation
// for each G in Genes
// RandNumberCount = NumberOfVoxels* SNPS in G * Iteration
// Launch Kernel to generate random N(0,1)'s
// for each V in Voxels
// P = pointer to next set of random N(0,1)'s
// Launch MatMul Kernel( LDMatrix_G and P )
// Plus reduce SNP p-values to GeneValue, Compare to Observed, Count
// Write Count/Iterations to VoxelGeneSim
// next V
// next G
//
// 5. Free GeneList
//
int numSMs;
const int devId = 0;
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, devId);
//
// STEP 1 -- Create the Gene List.
//
vector<KGene*> GeneList;
int *hSNPPosLenPairs = new int[NumberOfGenes * 2];
uint64_t *hLDMatrixList = new uint64_t[NumberOfGenes * 2];
cudaError_t err = cudaMemcpy(hSNPPosLenPairs, dSNPPosLenPairs, NumberOfGenes * 2 * sizeof(int), cudaMemcpyDeviceToHost);
assert(err == cudaSuccess);
err = cudaMemcpy(hLDMatrixList, dLDMatrixList, NumberOfGenes * 2 * sizeof(uint64_t), cudaMemcpyDeviceToHost);
assert(err == cudaSuccess);
int MaxSnpLen = 0;
for (int i = 0; i < NumberOfGenes; i++)
{
KGene* G = new KGene;
G->AddSnpPos(hSNPPosLenPairs[2 * i], hSNPPosLenPairs[2 * i + 1]);
G->AddLDMatrix(hLDMatrixList[2 * i], hLDMatrixList[2 * i + 1]);
if (G->GetSnpLen() > MaxSnpLen)
MaxSnpLen = G->GetSnpLen();
GeneList.push_back(G);
}
delete[] hSNPPosLenPairs;
delete[] hLDMatrixList;
//
// STEP 2
//
// First build a "mask" that will be used on the GPU to perform a
// "reduce by key" with "transformation". The Thrust library
// calls this mask a "key".
//
unsigned char *dVoxelSNPKey;
err = cudaMalloc((void**)&dVoxelSNPKey, NumberOfVoxels*NumberOfSNPs);
assert(err == cudaSuccess);
{
unsigned char *hVoxelSNPKey = new unsigned char[NumberOfVoxels*NumberOfSNPs];
int SnpSum = 0;
for (int i = 0; i < GeneList.size(); i++)
{
memset(hVoxelSNPKey + SnpSum, i, GeneList[i]->GetSnpLen());
SnpSum += GeneList[i]->GetSnpLen();
}
for (int i = 1; i < NumberOfVoxels; i++)
memcpy(hVoxelSNPKey + SnpSum*i, hVoxelSNPKey, SnpSum);
err = cudaMemcpy(dVoxelSNPKey, hVoxelSNPKey, SnpSum*NumberOfVoxels, cudaMemcpyHostToDevice);
delete[] hVoxelSNPKey;
}
//
// STEP 3
//
// Convert each SNP p-value to it's chi square value, then reduce
// them according to the VoxelSNPkey vector, and write the result
// to the VoxelGeneObserved matrix.
thrust::device_vector<unsigned char> d_OutputKeys(NumberOfVoxels*NumberOfGenes);
thrust::pair<thrust::device_vector<unsigned char>::iterator, float * > new_end;
new_end = thrust::reduce_by_key(thrust::device,
dVoxelSNPKey,
dVoxelSNPKey+ NumberOfVoxels*NumberOfSNPs,
thrust::make_transform_iterator(dVoxelSNP, InvChiSq_functor()),
d_OutputKeys.begin(),
dVoxelGeneObserved);
assert(new_end.first - d_OutputKeys.begin() == NumberOfVoxels*NumberOfGenes);
//
// This is kind of gross. Unfortunately I have to copy the dVoxelGeneObserved matrix
// back to the host because I have to pass each value into the loops below to act
// as a threshold against the computed Gene p-value.
//
float *hVoxelGeneObserved = new float[NumberOfVoxels*NumberOfGenes];
err = cudaMemcpy(hVoxelGeneObserved, dVoxelGeneObserved, NumberOfVoxels*NumberOfGenes * sizeof(float), cudaMemcpyDeviceToHost);
assert(err == cudaSuccess);
//
// STEP 4
//
const int NumBlock = 512;
const int NumThreads = NumThreadsPerBlock;
curandState_t* States;
err = cudaMalloc((void**)&States, NumBlock * NumThreads * sizeof(curandState_t));
assert(err == cudaSuccess);
InitRandomKernel << <NumBlock, NumThreads >> > (time(0), States);
float *dRandomNumberBuffer;
err = cudaMalloc(&dRandomNumberBuffer, MaxSnpLen * NumberOfVoxels * NumberOfIterations * sizeof(float));
assert(err == cudaSuccess);
float *dIterationResultBuffer;
err = cudaMalloc(&dIterationResultBuffer, MaxSnpLen * NumberOfIterations * sizeof(float));
assert(err == cudaSuccess);
thrust::device_vector<int> keys(NumberOfIterations, 0);
//
// Now loop over every gene ...
// and loop over every voxel ...
// and compute the VoxelGenSim value.
//
for (int g = 0; g < GeneList.size(); g++)
{
const int RandomNumberCount = GeneList[g]->GetSnpLen() * NumberOfVoxels * NumberOfIterations;
GenerateRandomSequenceKernel << <numSMs * 32, NumThreadsPerBlock >> > (dRandomNumberBuffer, RandomNumberCount, States);
for (int v = 0; v < NumberOfVoxels; v++)
{
const int StartingIndex = (v * GeneList[g]->GetSnpLen() * NumberOfIterations) & (~0x01F);
GenerateRandomVariatesKernel << <NumBlock, NumThreads >> > (GeneList[g]->GetLDMatrixPtr(), GeneList[g]->GetLDMatrixSize(), dRandomNumberBuffer, v, NumberOfIterations, dIterationResultBuffer);
//
// Square every element in dIterationResultBuffer, Plus-reduce every column, threshold against dVoxelGeneObserved(g,v), Count results.
// Write Count / Iterations to dVoxelGeneSim(G,Vox)
thrust::device_vector<float> col_sums(NumberOfIterations);
thrust::sequence(col_sums.begin(), col_sums.end());
thrust::transform(col_sums.begin(), col_sums.end(), col_sums.begin(), sumsq_functor(GeneList[g]->GetLDMatrixSize(), NumberOfIterations, thrust::raw_pointer_cast(dIterationResultBuffer)));
thrust::transform(col_sums.begin(), col_sums.end(), col_sums.begin(), compare_functor(hVoxelGeneObserved[v*NumberOfGenes + g]));
thrust::reduce_by_key(thrust::device,
keys.begin(),
keys.end(),
col_sums.begin(),
thrust::make_discard_iterator(),
dVoxelGeneSim + v*NumberOfGenes + g );
}
}
thrust::transform(thrust::device,
dVoxelGeneSim,
dVoxelGeneSim+NumberOfVoxels*NumberOfGenes,
thrust::make_constant_iterator<float>(NumberOfIterations),
dVoxelGeneSim,
thrust::divides<float>());
//
// STEP 5
//
delete[] hVoxelGeneObserved;
cudaFree(dIterationResultBuffer);
cudaFree(dRandomNumberBuffer);
cudaFree(States);
cudaFree(dVoxelSNPKey);
while (!GeneList.empty())
{
KGene *G = GeneList.back();
GeneList.pop_back();
delete G;
}
return cudaSuccess;
}
|
a295a7af43c0e41525003199b50f8fff15590766.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/rank_attention.cu.h"
#include "paddle/fluid/operators/rank_attention_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
template <typename DeviceContext, typename T>
class RankAttentionCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<phi::DenseTensor>("X");
auto *rank_offset = ctx.Input<phi::DenseTensor>("RankOffset");
auto *param = ctx.Input<phi::DenseTensor>("RankParam");
auto *input_help = ctx.Output<phi::DenseTensor>("InputHelp");
auto *ins_rank = ctx.Output<phi::DenseTensor>("InsRank");
int max_rank = ctx.Attr<int>("MaxRank");
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *Out = ctx.Output<phi::DenseTensor>("Out");
// check dims
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
PADDLE_ENFORCE_EQ(
rank_offset_dims[0],
ins_num,
platform::errors::InvalidArgument("Input(RankOffset) has wrong rows."));
PADDLE_ENFORCE_EQ((rank_offset_dims[1] - 1) / 2,
max_rank,
platform::errors::InvalidArgument(
"Input(RankOffset) has wrong columns."));
PADDLE_ENFORCE_EQ(
max_rank * max_rank * x_fea_dim,
para_row,
platform::errors::InvalidArgument("Input(RankParam) has wrong rows."));
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
int max_ins = ::max(ins_num, max_size);
phi::DenseTensor param_help;
param_help = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_help.mutable_data<T>(ctx.GetPlace());
input_help->Resize({max_ins, block_matrix_row});
ins_rank->Resize({max_ins, 1});
input_help->mutable_data<T>(ctx.GetPlace());
ins_rank->mutable_data<T>(ctx.GetPlace());
Out->mutable_data<T>(ctx.GetPlace());
// initialize
auto param_help_eigen = framework::EigenVector<T>::Flatten(param_help);
auto input_help_eigen = framework::EigenVector<T>::Flatten(*input_help);
auto ins_rank_eigen = framework::EigenVector<T>::Flatten(*ins_rank);
auto out_eigen = framework::EigenVector<T>::Flatten(*Out);
auto &place =
*ctx.template device_context<phi::GPUContext>().eigen_device();
param_help_eigen.device(place) =
param_help_eigen.constant(static_cast<T>(0));
input_help_eigen.device(place) =
input_help_eigen.constant(static_cast<T>(0));
ins_rank_eigen.device(place) = ins_rank_eigen.constant(static_cast<T>(-1));
out_eigen.device(place) = out_eigen.constant(static_cast<T>(0));
// get data ptr
T *input_help_data = input_help->data<T>();
T *param_help_data = param_help.data<T>();
T *ins_rank_data = ins_rank->data<T>();
T *out_data = Out->data<T>();
expand_rank_attention_input(ctx.cuda_device_context().stream(),
X->data<T>(),
ins_num,
x_fea_dim,
input_help_data,
ins_num,
block_matrix_row,
rank_offset->data<int>(),
rank_offset_dims[0],
rank_offset_dims[1],
ins_rank_data,
max_rank);
expand_rank_attention_param(ctx.cuda_device_context().stream(),
X->data<T>(),
ins_num,
x_fea_dim,
rank_offset->data<int>(),
rank_offset_dims[0],
rank_offset_dims[1],
param->data<T>(),
para_row,
para_col,
param_help_data,
ins_num * block_matrix_row,
para_col,
max_rank);
CBLAS_TRANSPOSE transA = CblasNoTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
T alpha = 1;
T beta = 0;
int64_t strideA = block_matrix_row;
int64_t strideB = block_matrix_row * para_col;
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
blas.BatchedGEMM(transA,
transB,
1,
para_col,
block_matrix_row,
alpha,
input_help_data,
param_help_data,
beta,
out_data,
ins_num,
strideA,
strideB);
}
};
template <typename DeviceContext, typename T>
class RankAttentionGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<phi::DenseTensor>("X"); // not use data
auto *rank_offset =
ctx.Input<phi::DenseTensor>("RankOffset"); // not use data
auto *param = ctx.Input<phi::DenseTensor>("RankParam"); // not use data
auto *input_help = ctx.Input<phi::DenseTensor>("InputHelp");
auto *ins_rank = ctx.Input<phi::DenseTensor>("InsRank");
auto *dout = ctx.Input<phi::DenseTensor>(framework::GradVarName("Out"));
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *drank_para =
ctx.Output<phi::DenseTensor>(framework::GradVarName("RankParam"));
// get dim
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
auto max_rank = (rank_offset_dims[1] - 1) / 2;
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
auto &place =
*ctx.template device_context<phi::GPUContext>().eigen_device();
int max_ins = ::max(ins_num, max_size);
// initialize out grad
drank_para->mutable_data<T>(ctx.GetPlace());
auto drank_para_eigen = framework::EigenVector<T>::Flatten(*drank_para);
drank_para_eigen.device(place) =
drank_para_eigen.constant(static_cast<T>(0));
// copy data
phi::DenseTensor param_grad;
param_grad = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_grad.mutable_data<T>(ctx.GetPlace());
// initialize
auto param_grad_eigen = framework::EigenVector<T>::Flatten(param_grad);
param_grad_eigen.device(place) =
param_grad_eigen.constant(static_cast<T>(0));
// get data ptr
const T *input_help_data = input_help->data<T>();
const T *ins_rank_data = ins_rank->data<T>();
T *param_grad_data = param_grad.data<T>();
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
T alpha = 1;
T beta = 0;
// get param_grad
CBLAS_TRANSPOSE transA = CblasTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
int64_t strideA = block_matrix_row;
int64_t strideB = para_col;
blas.BatchedGEMM(transA,
transB,
block_matrix_row,
para_col,
1,
alpha,
input_help_data,
dout->data<T>(),
beta,
param_grad_data,
ins_num,
strideA,
strideB);
// merge param_grad to get drank_para
merge_rank_attention_param_grad(ctx.cuda_device_context().stream(),
param_grad_data,
ins_num * block_matrix_row,
para_col,
drank_para->data<T>(),
para_row,
para_col,
ins_rank_data,
ins_num,
max_rank,
x_fea_dim);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using GPUCtx = phi::GPUContext;
REGISTER_OP_CUDA_KERNEL(rank_attention,
ops::RankAttentionCUDAKernel<GPUCtx, float>,
ops::RankAttentionCUDAKernel<GPUCtx, double>);
REGISTER_OP_CUDA_KERNEL(rank_attention_grad,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, float>,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, double>);
| a295a7af43c0e41525003199b50f8fff15590766.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/rank_attention.cu.h"
#include "paddle/fluid/operators/rank_attention_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
template <typename DeviceContext, typename T>
class RankAttentionCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<phi::DenseTensor>("X");
auto *rank_offset = ctx.Input<phi::DenseTensor>("RankOffset");
auto *param = ctx.Input<phi::DenseTensor>("RankParam");
auto *input_help = ctx.Output<phi::DenseTensor>("InputHelp");
auto *ins_rank = ctx.Output<phi::DenseTensor>("InsRank");
int max_rank = ctx.Attr<int>("MaxRank");
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *Out = ctx.Output<phi::DenseTensor>("Out");
// check dims
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
PADDLE_ENFORCE_EQ(
rank_offset_dims[0],
ins_num,
platform::errors::InvalidArgument("Input(RankOffset) has wrong rows."));
PADDLE_ENFORCE_EQ((rank_offset_dims[1] - 1) / 2,
max_rank,
platform::errors::InvalidArgument(
"Input(RankOffset) has wrong columns."));
PADDLE_ENFORCE_EQ(
max_rank * max_rank * x_fea_dim,
para_row,
platform::errors::InvalidArgument("Input(RankParam) has wrong rows."));
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
int max_ins = std::max(ins_num, max_size);
phi::DenseTensor param_help;
param_help = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_help.mutable_data<T>(ctx.GetPlace());
input_help->Resize({max_ins, block_matrix_row});
ins_rank->Resize({max_ins, 1});
input_help->mutable_data<T>(ctx.GetPlace());
ins_rank->mutable_data<T>(ctx.GetPlace());
Out->mutable_data<T>(ctx.GetPlace());
// initialize
auto param_help_eigen = framework::EigenVector<T>::Flatten(param_help);
auto input_help_eigen = framework::EigenVector<T>::Flatten(*input_help);
auto ins_rank_eigen = framework::EigenVector<T>::Flatten(*ins_rank);
auto out_eigen = framework::EigenVector<T>::Flatten(*Out);
auto &place =
*ctx.template device_context<phi::GPUContext>().eigen_device();
param_help_eigen.device(place) =
param_help_eigen.constant(static_cast<T>(0));
input_help_eigen.device(place) =
input_help_eigen.constant(static_cast<T>(0));
ins_rank_eigen.device(place) = ins_rank_eigen.constant(static_cast<T>(-1));
out_eigen.device(place) = out_eigen.constant(static_cast<T>(0));
// get data ptr
T *input_help_data = input_help->data<T>();
T *param_help_data = param_help.data<T>();
T *ins_rank_data = ins_rank->data<T>();
T *out_data = Out->data<T>();
expand_rank_attention_input(ctx.cuda_device_context().stream(),
X->data<T>(),
ins_num,
x_fea_dim,
input_help_data,
ins_num,
block_matrix_row,
rank_offset->data<int>(),
rank_offset_dims[0],
rank_offset_dims[1],
ins_rank_data,
max_rank);
expand_rank_attention_param(ctx.cuda_device_context().stream(),
X->data<T>(),
ins_num,
x_fea_dim,
rank_offset->data<int>(),
rank_offset_dims[0],
rank_offset_dims[1],
param->data<T>(),
para_row,
para_col,
param_help_data,
ins_num * block_matrix_row,
para_col,
max_rank);
CBLAS_TRANSPOSE transA = CblasNoTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
T alpha = 1;
T beta = 0;
int64_t strideA = block_matrix_row;
int64_t strideB = block_matrix_row * para_col;
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
blas.BatchedGEMM(transA,
transB,
1,
para_col,
block_matrix_row,
alpha,
input_help_data,
param_help_data,
beta,
out_data,
ins_num,
strideA,
strideB);
}
};
template <typename DeviceContext, typename T>
class RankAttentionGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<phi::DenseTensor>("X"); // not use data
auto *rank_offset =
ctx.Input<phi::DenseTensor>("RankOffset"); // not use data
auto *param = ctx.Input<phi::DenseTensor>("RankParam"); // not use data
auto *input_help = ctx.Input<phi::DenseTensor>("InputHelp");
auto *ins_rank = ctx.Input<phi::DenseTensor>("InsRank");
auto *dout = ctx.Input<phi::DenseTensor>(framework::GradVarName("Out"));
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *drank_para =
ctx.Output<phi::DenseTensor>(framework::GradVarName("RankParam"));
// get dim
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
auto max_rank = (rank_offset_dims[1] - 1) / 2;
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
auto &place =
*ctx.template device_context<phi::GPUContext>().eigen_device();
int max_ins = std::max(ins_num, max_size);
// initialize out grad
drank_para->mutable_data<T>(ctx.GetPlace());
auto drank_para_eigen = framework::EigenVector<T>::Flatten(*drank_para);
drank_para_eigen.device(place) =
drank_para_eigen.constant(static_cast<T>(0));
// copy data
phi::DenseTensor param_grad;
param_grad = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_grad.mutable_data<T>(ctx.GetPlace());
// initialize
auto param_grad_eigen = framework::EigenVector<T>::Flatten(param_grad);
param_grad_eigen.device(place) =
param_grad_eigen.constant(static_cast<T>(0));
// get data ptr
const T *input_help_data = input_help->data<T>();
const T *ins_rank_data = ins_rank->data<T>();
T *param_grad_data = param_grad.data<T>();
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
T alpha = 1;
T beta = 0;
// get param_grad
CBLAS_TRANSPOSE transA = CblasTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
int64_t strideA = block_matrix_row;
int64_t strideB = para_col;
blas.BatchedGEMM(transA,
transB,
block_matrix_row,
para_col,
1,
alpha,
input_help_data,
dout->data<T>(),
beta,
param_grad_data,
ins_num,
strideA,
strideB);
// merge param_grad to get drank_para
merge_rank_attention_param_grad(ctx.cuda_device_context().stream(),
param_grad_data,
ins_num * block_matrix_row,
para_col,
drank_para->data<T>(),
para_row,
para_col,
ins_rank_data,
ins_num,
max_rank,
x_fea_dim);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using GPUCtx = phi::GPUContext;
REGISTER_OP_CUDA_KERNEL(rank_attention,
ops::RankAttentionCUDAKernel<GPUCtx, float>,
ops::RankAttentionCUDAKernel<GPUCtx, double>);
REGISTER_OP_CUDA_KERNEL(rank_attention_grad,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, float>,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, double>);
|
5560bb95817e41553a4b18f9a6ab96838ecf40d2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "config.h"
#include "histogram.h"
#include "d_classify.h"
#include "CHECK.h"
#include "wrappers.h"
#define CLASSBLOCKDIM 1024
static __device__ void computeHistSz(int *, int *);
//parameters for building the histogram from the image
//TILEWIDTH is number of pixels in a row that a single thread will handle
#define TILEWIDTH 8
#define HISTBLOCKDIM 32
//prototypes for functions local to this file
static float histogramOnGPU(histogramT *, unsigned char *, int, int, int);
static float classifyOnGPU(float *, int *, int modelCt);
//prototypes for the kernels
static __global__ void d_histoKernel(histogramT *, unsigned char *, int, int, int);
static __global__ void d_classifyKernel(float *, float *, int *);
static __global__ void emptyKernel();
//prototypes of functions called by d_classifyKernel
static __device__ void normalizeHist(float *, int *, int);
static __device__ void intersection(float * normHistograms, float * intersect);
//for debugging
static __device__ void printFloatArray(float * array, int startIdx, int length);
__device__ void printIntArray(int * data, int length);
/*
d_classify
Performs image classification on the GPU by first building a histogram
to represent the image and then comparing the histogram to each of the
histogram models.
Outputs:
Phisto - pointer to histogramT struct containing the bins
dresult - comparisonT array of structs; one element per model
Inputs:
models - an array of pointers to histogramT structs; one element per
model to be compared to the input
Pin - array contains the color pixels of the image to be used for
building a histogram and doing the classification
width and height - dimensions of the image
pitch - size of each row
Returns the amount of time it takes to build the histogram and
classify the image
*/
float d_classify(histogramT * Phisto, comparisonT * dresult,
histogramT ** models, int modelCt, unsigned char * Pin,
int height, int width, int pitch)
{
float gpuMsecTime1, gpuMsecTime2;
//launch an empty kernel to get more accurate timing
hipLaunchKernelGGL(( emptyKernel), dim3(1024), dim3(1024), 0, 0, );
//build a histogram of the input image
gpuMsecTime1 = histogramOnGPU(Phisto, Pin, height, width, pitch);
//allocate array to hold all histograms, including the histogram for the input
int * histograms = (int *) Malloc(sizeof(int) * (modelCt + 1) * TOTALBINS);
//copy the histogram for the input to the beginning of the array
memcpy(histograms, Phisto->histogram, sizeof(int) * TOTALBINS);
//copy the remaining histograms
for (int i = 1; i <= modelCt; i++)
memcpy(&histograms[i*TOTALBINS], models[i - 1]->histogram, sizeof(int) * TOTALBINS);
//allocate an array of floats to hold the comparisons
float * comparisons = (float *) Malloc(sizeof(int) * modelCt);
//perform the classification
gpuMsecTime2 = classifyOnGPU(comparisons, histograms, modelCt);
//copy the results into the output
for (int i = 0; i < modelCt; i++)
{
dresult[i].comparison = comparisons[i];
strncpy(dresult[i].fileName, models[i]->fileName, NAMELEN);
}
return gpuMsecTime1 + gpuMsecTime2;
}
/*
histogramOnGPU
Builds a histogram to represent the input image.
Outputs:
Phisto - pointer to the histogramT struct containing the bins
Inputs:
Pin - array contains the color pixels of the image to be used for
building a histogram
width and height - dimensions of the image
pitch - size of each row
Returns the amount of time it takes to build the histogram
*/
float histogramOnGPU(histogramT * Phisto, unsigned char * Pin, int height,
int width, int pitch)
{
//THIS CODE IS COMPLETE
hipEvent_t start_gpu, stop_gpu;
float gpuMsecTime = -1;
//Use cuda functions to do the timing
//create event objects
CHECK(hipEventCreate(&start_gpu));
CHECK(hipEventCreate(&stop_gpu));
unsigned char * d_Pin;
int numPinBytes = sizeof(unsigned char) * pitch * height * CHANNELS;
histogramT * d_Phisto;
//create the array on the GPU to hold input
CHECK(hipMalloc((void **)&d_Pin, numPinBytes));
CHECK(hipMemcpy(d_Pin, Pin, numPinBytes, hipMemcpyHostToDevice));
//create the array on the GPU to hold the histogram
CHECK(hipMalloc((void **)&d_Phisto, sizeof(histogramT)));
CHECK(hipMemcpy(d_Phisto, Phisto, sizeof(histogramT),
hipMemcpyHostToDevice));
//build the histogram
CHECK(hipEventRecord(start_gpu));
//each thread calculates TILEWIDTH elements in a row
dim3 grid(ceil(width/(float)(HISTBLOCKDIM * TILEWIDTH)),
ceil(height/(float)HISTBLOCKDIM), 1);
dim3 block(HISTBLOCKDIM, HISTBLOCKDIM, 1);
hipLaunchKernelGGL(( d_histoKernel), dim3(grid), dim3(block), 0, 0, d_Phisto, d_Pin, height, width, pitch);
CHECK(hipEventRecord(stop_gpu));
CHECK(hipMemcpy(Phisto, d_Phisto, sizeof(histogramT),
hipMemcpyDeviceToHost));
//record the ending time and wait for event to complete
CHECK(hipEventSynchronize(stop_gpu));
//calculate the elapsed time between the two events
CHECK(hipEventElapsedTime(&gpuMsecTime, start_gpu, stop_gpu));
return gpuMsecTime;
}
/*
d_histoKernel
Kernel code executed by each thread on its own data when the kernel is
launched. Each thread operates on TILEWIDTH pixels in a row.
Inputs:
Pin - array contains the color pixels to be used to build the histogram
width and height - dimensions of the image
pitch - size of each row
Output:
histo - pointer to a histogramT struct that contains an array of bins
*/
__global__
void d_histoKernel(histogramT * histo, unsigned char * Pin, int height,
int width, int pitch)
{
//THIS CODE IS COMPLETE. You can replace it with a faster version
//if you like, but the shared memory version won't work with all
//TOTALBINS sizes. If you use that one, the largest BIN value can
//only be 8.
int colStart = (blockIdx.x * blockDim.x + threadIdx.x) * TILEWIDTH;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col;
//use a privatization technique to reduce the number of atomic adds
int accumulator = 0;
int prevBin = -1;
int currBin;
//go through each pixel in the tile
for (int i = 0; i < TILEWIDTH; i++)
{
col = colStart + i;
if (row < height && col < width)
{
//flatten the 2D indices
int pIndx = row * CHANNELS * pitch + col * CHANNELS;
unsigned char redVal = Pin[pIndx];
unsigned char greenVal = Pin[pIndx + 1];
unsigned char blueVal = Pin[pIndx + 2];
currBin = (redVal/TONESPB)*BINS*BINS + (blueVal/TONESPB)*BINS
+ greenVal/TONESPB;
if (currBin != prevBin)
{
if (accumulator > 0)
atomicAdd(&(histo->histogram[prevBin]), accumulator);
prevBin = currBin;
accumulator = 1;
} else accumulator++;
}
}
if (accumulator > 0)
{
atomicAdd(&(histo->histogram[prevBin]), accumulator);
}
}
/*
classifyOnGPU
Performs image classification on the GPU
Outputs:
comparisons - an array of size modelCt. comparisons[i] is set to the
result of comparing the input image to model i
The size of this array is modelCt.
Inputs:
histograms - an array of histograms.
The histogram for the input image is in:
histograms[0] ... histogram[TOTALBINS - 1]
The histogram for model 0 is in:
histograms[TOTALBINS] ... histogram[2*TOTALBINS - 1]
The histogram for the last model is in:
histograms[modelCt*TOTALBINS] ... histogram[modelCt*TOTALBINS - 1]
Thus, note that the array contains the input histogram and the
model histograms and thus is of size (modelCt + 1) * TOTALBINS
modelCt - count of the number of models used for the classification
Returns the amount of time it takes to classify the image
*/
float classifyOnGPU(float * comparisons, int * histograms, int modelCt)
{
hipEvent_t start_gpu, stop_gpu;
float gpuMsecTime = -1;
//allocate an float array on the GPU to hold the normalized histograms
//It needs to be big enough to hold the histogram of the input image and
//and the histograms of all of the models.
float * normHistograms;
CHECK(hipMalloc((void **)&normHistograms, sizeof(float) * TOTALBINS * (modelCt + 1)));
int * dhistograms;
//allocate an int array on the GPU to hold the original histograms
//It needs to be big enough to hold the histogram of the input image and
//and the histograms of all of the models.
CHECK(hipMalloc((void **)&dhistograms, sizeof(float) * TOTALBINS * (modelCt + 1)));
//copy input histograms into dhistograms
CHECK(hipMemcpy(dhistograms, histograms, sizeof(int) * TOTALBINS * (modelCt + 1),
hipMemcpyHostToDevice));
float * dcomparisons;
//allocate a float array on the GPU to hold the comparisons
//there needs to be one element per model
CHECK(hipMalloc((void **)&dcomparisons, sizeof(float) * modelCt ));
//Use cuda functions to do the timing
//create event objects
CHECK(hipEventCreate(&start_gpu));
CHECK(hipEventCreate(&stop_gpu));
//record the starting time
CHECK(hipEventRecord(start_gpu));
//each model is handled by a single block of threads
//an extra block of threads is needed to normalize the input histogram
dim3 grid(modelCt + 1, 1, 1);
//don't make block any larger than the number of bins
dim3 block(min(TOTALBINS, CLASSBLOCKDIM), 1);
hipLaunchKernelGGL(( d_classifyKernel), dim3(grid), dim3(block), 0, 0, dcomparisons, normHistograms, dhistograms);
CHECK(hipEventRecord(stop_gpu));
//copy the device comparison array into the host comparison array
CHECK(hipMemcpy(comparisons, dcomparisons, sizeof(float) * modelCt,
hipMemcpyDeviceToHost));
//record the ending time and wait for event to complete
CHECK(hipEventSynchronize(stop_gpu));
//calculate the elapsed time between the two events
CHECK(hipEventElapsedTime(&gpuMsecTime, start_gpu, stop_gpu));
return gpuMsecTime;
}
/*
d_classifyKernel
Kernel used to do the image classification on the GPU. Each block of
threads normalizes a single histogram. After that, every block except
for block 0 will perform the intersection and store a
result in the comparisons array.
Thus, each block (except for 0) produces one result for the comparisons
array. Each thread in a block handles TOTALBINS/blockDim.x elements
Inputs:
histograms - array of size gridDim.x * TOTALBINS. It contains
gridDim.x histograms each of size TOTALBINS. The first one
is the input histogram.
Outputs:
comparisons - comparison[i] is set to the value of the comparison of the
input histogram and the histogram of model i; for example,
comparison[0] is set to comparison of the input and model 0.
normHistograms - array of size gridDim.x * TOTALBINS. It contains
gridDim.x histograms that are equal to the normalization
of the input histograms.
*/
__device__ int blockSync = 0; //need this to provide synchronization among blocks
__global__ void d_classifyKernel(float * comparisons, float * normHistograms, int * histograms)
{
__shared__ int histSz;
__shared__ float intersect;
//thread 0 in the block should initialize histSz and intersect to 0
if (threadIdx.x == 0)
{
intersect = 0;
histSz = 0;
}
__syncthreads();
computeHistSz(histograms, &histSz);
__syncthreads();
/*
if (threadIdx.x == 0)
{
printf("%d\n", histSz);
}
*/
//normalize the histogram
normalizeHist(normHistograms, histograms, histSz);
__syncthreads();
//after block 0 has finished computing the normalized histogram,
//one thread in its block should set blockSync to 1 so other blocks can
//then proceed to compute the intersection
if (blockIdx.x == 0 && threadIdx.x == 0)
{
atomicAdd(&blockSync, 1);
__threadfence();
}
else if (blockIdx.x > 0)
{
//if not a block 0 thread, wait until blockSync is no longer 0 before
//continuing (page 193 has logic similar to what has to be done here)
while (atomicAdd(&blockSync, 0) == 0);
//compute the intersection
intersection(normHistograms, &intersect);
__syncthreads();
//one thread in all blocks except 0 should store the fractional intersect
//value in the comparisons array
if (threadIdx.x == 0)
{
//printf("Intersect: %f, NORMMAX: %d, Storing %f in comparisons[%d]\n",
// intersect, NORMMAX, intersect/NORMMAX, blockIdx.x - 1);
comparisons[blockIdx.x - 1] = intersect/NORMMAX;
}
}
}
/*
intersection
Calculates the intersection of the input histogram and a model histogram
after they have been normalized.
The input histogram is in normHistograms[0] ... normHistograms[TOTALBINS - 1]
The model histogram is in normHistograms[TOTALBINS * blockIdx.x] ...
normHistograms[TOTALBINS * blockIdx.x - 1]
Inputs:
normHistograms - array of TOTALBINS * gridDim.x bins (gridDim.x histograms)
intersect - pointer to the shared intersect value
Outputs:
shared intersect variable is incremented by the intersection calculated by the
thread running this code
*/
__device__ void intersection(float * normHistograms, float * intersect)
{
//compute intersection using cyclic partitioning
float * normHistogramTile = &normHistograms[blockIdx.x * TOTALBINS];
int tdx = threadIdx.x;
while (tdx < TOTALBINS)
{
float minTwo = fmin(normHistogramTile[tdx], normHistograms[tdx]);
atomicAdd(intersect, minTwo);
tdx += blockDim.x;
}
}
__device__ void printIntArray(int * data, int length)
{
int i, j = 0;
for (i = 0; i < length; i++, j++)
{
if ((j % 10) == 0) printf("\n%3d: ", i);
printf("%5d ", data[i]);
}
}
/*
computeHistSz
Calculates the size of a histogram by adding up all of the bin
values. The histogram to be used for the calculation is in elements
histograms[blockIdx.x * TOTALBINS] ... histograms[(blockIdx.x + 1) * TOTALBINS]
Inputs:
histograms - array of TOTALBINS * gridDim.x bins (gridDim.x histograms)
histSz - pointer to the shared histogram size variable
Outputs:
shared histogram size variable is incremented by the size calculated by
the thread running this code
*/
__device__ void computeHistSz(int * histograms, int * histSz)
{
//compute histogram size (sum of bins) using cyclic partitioning
int tdx = threadIdx.x;
int * histogramTile = &histograms[blockIdx.x * TOTALBINS];
while (tdx < TOTALBINS)
{
atomicAdd(histSz, histogramTile[tdx]);
tdx += blockDim.x;
}
}
/*
normalizeHist
Normalizes the histogram so that every bin value is between 0 and NORMMAX.
The histogram to be normalized is in elements
histograms[blockIdx.x * TOTALBINS] ... histograms[(blockIdx.x + 1) * TOTALBINS]
The result will be stored in normHistograms[blockIdx.x * TOTALBINS] ...
normHistograms[(blockIdx.x + 1) * TOTALBINS]
Inputs:
histograms - array that holds the histogram to be normalized
histSz - size of the input histogram (sum of its bins)
Outputs:
normHistograms - array to hold the normalized histogram
*/
__device__ void normalizeHist(float * normHistograms, int * histograms, int histSz)
{
//compute the normalized histogram using cyclic partitioning
int tdx = threadIdx.x;
int * histogramTile = &histograms[blockIdx.x * TOTALBINS];
float * normHistogramTile = &normHistograms[blockIdx.x * TOTALBINS];
while (tdx < TOTALBINS)
{
normHistogramTile[tdx] = (histogramTile[tdx]/(float)histSz) * NORMMAX;
tdx += blockDim.x;
}
}
//this can be used for debugging
__device__ void printFloatArray(float * array, int startIdx, int length)
{
int i, j = 0;
for (i = startIdx; i < startIdx + length; i++, j++)
{
if ((j % 16) == 0) printf("\n%3d: ", i);
printf("%6.1f ", array[i]);
}
}
//launched to get more accurate timing
__global__ void emptyKernel()
{
}
| 5560bb95817e41553a4b18f9a6ab96838ecf40d2.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "config.h"
#include "histogram.h"
#include "d_classify.h"
#include "CHECK.h"
#include "wrappers.h"
#define CLASSBLOCKDIM 1024
static __device__ void computeHistSz(int *, int *);
//parameters for building the histogram from the image
//TILEWIDTH is number of pixels in a row that a single thread will handle
#define TILEWIDTH 8
#define HISTBLOCKDIM 32
//prototypes for functions local to this file
static float histogramOnGPU(histogramT *, unsigned char *, int, int, int);
static float classifyOnGPU(float *, int *, int modelCt);
//prototypes for the kernels
static __global__ void d_histoKernel(histogramT *, unsigned char *, int, int, int);
static __global__ void d_classifyKernel(float *, float *, int *);
static __global__ void emptyKernel();
//prototypes of functions called by d_classifyKernel
static __device__ void normalizeHist(float *, int *, int);
static __device__ void intersection(float * normHistograms, float * intersect);
//for debugging
static __device__ void printFloatArray(float * array, int startIdx, int length);
__device__ void printIntArray(int * data, int length);
/*
d_classify
Performs image classification on the GPU by first building a histogram
to represent the image and then comparing the histogram to each of the
histogram models.
Outputs:
Phisto - pointer to histogramT struct containing the bins
dresult - comparisonT array of structs; one element per model
Inputs:
models - an array of pointers to histogramT structs; one element per
model to be compared to the input
Pin - array contains the color pixels of the image to be used for
building a histogram and doing the classification
width and height - dimensions of the image
pitch - size of each row
Returns the amount of time it takes to build the histogram and
classify the image
*/
float d_classify(histogramT * Phisto, comparisonT * dresult,
histogramT ** models, int modelCt, unsigned char * Pin,
int height, int width, int pitch)
{
float gpuMsecTime1, gpuMsecTime2;
//launch an empty kernel to get more accurate timing
emptyKernel<<<1024, 1024>>>();
//build a histogram of the input image
gpuMsecTime1 = histogramOnGPU(Phisto, Pin, height, width, pitch);
//allocate array to hold all histograms, including the histogram for the input
int * histograms = (int *) Malloc(sizeof(int) * (modelCt + 1) * TOTALBINS);
//copy the histogram for the input to the beginning of the array
memcpy(histograms, Phisto->histogram, sizeof(int) * TOTALBINS);
//copy the remaining histograms
for (int i = 1; i <= modelCt; i++)
memcpy(&histograms[i*TOTALBINS], models[i - 1]->histogram, sizeof(int) * TOTALBINS);
//allocate an array of floats to hold the comparisons
float * comparisons = (float *) Malloc(sizeof(int) * modelCt);
//perform the classification
gpuMsecTime2 = classifyOnGPU(comparisons, histograms, modelCt);
//copy the results into the output
for (int i = 0; i < modelCt; i++)
{
dresult[i].comparison = comparisons[i];
strncpy(dresult[i].fileName, models[i]->fileName, NAMELEN);
}
return gpuMsecTime1 + gpuMsecTime2;
}
/*
histogramOnGPU
Builds a histogram to represent the input image.
Outputs:
Phisto - pointer to the histogramT struct containing the bins
Inputs:
Pin - array contains the color pixels of the image to be used for
building a histogram
width and height - dimensions of the image
pitch - size of each row
Returns the amount of time it takes to build the histogram
*/
float histogramOnGPU(histogramT * Phisto, unsigned char * Pin, int height,
int width, int pitch)
{
//THIS CODE IS COMPLETE
cudaEvent_t start_gpu, stop_gpu;
float gpuMsecTime = -1;
//Use cuda functions to do the timing
//create event objects
CHECK(cudaEventCreate(&start_gpu));
CHECK(cudaEventCreate(&stop_gpu));
unsigned char * d_Pin;
int numPinBytes = sizeof(unsigned char) * pitch * height * CHANNELS;
histogramT * d_Phisto;
//create the array on the GPU to hold input
CHECK(cudaMalloc((void **)&d_Pin, numPinBytes));
CHECK(cudaMemcpy(d_Pin, Pin, numPinBytes, cudaMemcpyHostToDevice));
//create the array on the GPU to hold the histogram
CHECK(cudaMalloc((void **)&d_Phisto, sizeof(histogramT)));
CHECK(cudaMemcpy(d_Phisto, Phisto, sizeof(histogramT),
cudaMemcpyHostToDevice));
//build the histogram
CHECK(cudaEventRecord(start_gpu));
//each thread calculates TILEWIDTH elements in a row
dim3 grid(ceil(width/(float)(HISTBLOCKDIM * TILEWIDTH)),
ceil(height/(float)HISTBLOCKDIM), 1);
dim3 block(HISTBLOCKDIM, HISTBLOCKDIM, 1);
d_histoKernel<<<grid, block>>>(d_Phisto, d_Pin, height, width, pitch);
CHECK(cudaEventRecord(stop_gpu));
CHECK(cudaMemcpy(Phisto, d_Phisto, sizeof(histogramT),
cudaMemcpyDeviceToHost));
//record the ending time and wait for event to complete
CHECK(cudaEventSynchronize(stop_gpu));
//calculate the elapsed time between the two events
CHECK(cudaEventElapsedTime(&gpuMsecTime, start_gpu, stop_gpu));
return gpuMsecTime;
}
/*
d_histoKernel
Kernel code executed by each thread on its own data when the kernel is
launched. Each thread operates on TILEWIDTH pixels in a row.
Inputs:
Pin - array contains the color pixels to be used to build the histogram
width and height - dimensions of the image
pitch - size of each row
Output:
histo - pointer to a histogramT struct that contains an array of bins
*/
__global__
void d_histoKernel(histogramT * histo, unsigned char * Pin, int height,
int width, int pitch)
{
//THIS CODE IS COMPLETE. You can replace it with a faster version
//if you like, but the shared memory version won't work with all
//TOTALBINS sizes. If you use that one, the largest BIN value can
//only be 8.
int colStart = (blockIdx.x * blockDim.x + threadIdx.x) * TILEWIDTH;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col;
//use a privatization technique to reduce the number of atomic adds
int accumulator = 0;
int prevBin = -1;
int currBin;
//go through each pixel in the tile
for (int i = 0; i < TILEWIDTH; i++)
{
col = colStart + i;
if (row < height && col < width)
{
//flatten the 2D indices
int pIndx = row * CHANNELS * pitch + col * CHANNELS;
unsigned char redVal = Pin[pIndx];
unsigned char greenVal = Pin[pIndx + 1];
unsigned char blueVal = Pin[pIndx + 2];
currBin = (redVal/TONESPB)*BINS*BINS + (blueVal/TONESPB)*BINS
+ greenVal/TONESPB;
if (currBin != prevBin)
{
if (accumulator > 0)
atomicAdd(&(histo->histogram[prevBin]), accumulator);
prevBin = currBin;
accumulator = 1;
} else accumulator++;
}
}
if (accumulator > 0)
{
atomicAdd(&(histo->histogram[prevBin]), accumulator);
}
}
/*
classifyOnGPU
Performs image classification on the GPU
Outputs:
comparisons - an array of size modelCt. comparisons[i] is set to the
result of comparing the input image to model i
The size of this array is modelCt.
Inputs:
histograms - an array of histograms.
The histogram for the input image is in:
histograms[0] ... histogram[TOTALBINS - 1]
The histogram for model 0 is in:
histograms[TOTALBINS] ... histogram[2*TOTALBINS - 1]
The histogram for the last model is in:
histograms[modelCt*TOTALBINS] ... histogram[modelCt*TOTALBINS - 1]
Thus, note that the array contains the input histogram and the
model histograms and thus is of size (modelCt + 1) * TOTALBINS
modelCt - count of the number of models used for the classification
Returns the amount of time it takes to classify the image
*/
float classifyOnGPU(float * comparisons, int * histograms, int modelCt)
{
cudaEvent_t start_gpu, stop_gpu;
float gpuMsecTime = -1;
//allocate an float array on the GPU to hold the normalized histograms
//It needs to be big enough to hold the histogram of the input image and
//and the histograms of all of the models.
float * normHistograms;
CHECK(cudaMalloc((void **)&normHistograms, sizeof(float) * TOTALBINS * (modelCt + 1)));
int * dhistograms;
//allocate an int array on the GPU to hold the original histograms
//It needs to be big enough to hold the histogram of the input image and
//and the histograms of all of the models.
CHECK(cudaMalloc((void **)&dhistograms, sizeof(float) * TOTALBINS * (modelCt + 1)));
//copy input histograms into dhistograms
CHECK(cudaMemcpy(dhistograms, histograms, sizeof(int) * TOTALBINS * (modelCt + 1),
cudaMemcpyHostToDevice));
float * dcomparisons;
//allocate a float array on the GPU to hold the comparisons
//there needs to be one element per model
CHECK(cudaMalloc((void **)&dcomparisons, sizeof(float) * modelCt ));
//Use cuda functions to do the timing
//create event objects
CHECK(cudaEventCreate(&start_gpu));
CHECK(cudaEventCreate(&stop_gpu));
//record the starting time
CHECK(cudaEventRecord(start_gpu));
//each model is handled by a single block of threads
//an extra block of threads is needed to normalize the input histogram
dim3 grid(modelCt + 1, 1, 1);
//don't make block any larger than the number of bins
dim3 block(min(TOTALBINS, CLASSBLOCKDIM), 1);
d_classifyKernel<<<grid, block>>>(dcomparisons, normHistograms, dhistograms);
CHECK(cudaEventRecord(stop_gpu));
//copy the device comparison array into the host comparison array
CHECK(cudaMemcpy(comparisons, dcomparisons, sizeof(float) * modelCt,
cudaMemcpyDeviceToHost));
//record the ending time and wait for event to complete
CHECK(cudaEventSynchronize(stop_gpu));
//calculate the elapsed time between the two events
CHECK(cudaEventElapsedTime(&gpuMsecTime, start_gpu, stop_gpu));
return gpuMsecTime;
}
/*
d_classifyKernel
Kernel used to do the image classification on the GPU. Each block of
threads normalizes a single histogram. After that, every block except
for block 0 will perform the intersection and store a
result in the comparisons array.
Thus, each block (except for 0) produces one result for the comparisons
array. Each thread in a block handles TOTALBINS/blockDim.x elements
Inputs:
histograms - array of size gridDim.x * TOTALBINS. It contains
gridDim.x histograms each of size TOTALBINS. The first one
is the input histogram.
Outputs:
comparisons - comparison[i] is set to the value of the comparison of the
input histogram and the histogram of model i; for example,
comparison[0] is set to comparison of the input and model 0.
normHistograms - array of size gridDim.x * TOTALBINS. It contains
gridDim.x histograms that are equal to the normalization
of the input histograms.
*/
__device__ int blockSync = 0; //need this to provide synchronization among blocks
__global__ void d_classifyKernel(float * comparisons, float * normHistograms, int * histograms)
{
__shared__ int histSz;
__shared__ float intersect;
//thread 0 in the block should initialize histSz and intersect to 0
if (threadIdx.x == 0)
{
intersect = 0;
histSz = 0;
}
__syncthreads();
computeHistSz(histograms, &histSz);
__syncthreads();
/*
if (threadIdx.x == 0)
{
printf("%d\n", histSz);
}
*/
//normalize the histogram
normalizeHist(normHistograms, histograms, histSz);
__syncthreads();
//after block 0 has finished computing the normalized histogram,
//one thread in its block should set blockSync to 1 so other blocks can
//then proceed to compute the intersection
if (blockIdx.x == 0 && threadIdx.x == 0)
{
atomicAdd(&blockSync, 1);
__threadfence();
}
else if (blockIdx.x > 0)
{
//if not a block 0 thread, wait until blockSync is no longer 0 before
//continuing (page 193 has logic similar to what has to be done here)
while (atomicAdd(&blockSync, 0) == 0);
//compute the intersection
intersection(normHistograms, &intersect);
__syncthreads();
//one thread in all blocks except 0 should store the fractional intersect
//value in the comparisons array
if (threadIdx.x == 0)
{
//printf("Intersect: %f, NORMMAX: %d, Storing %f in comparisons[%d]\n",
// intersect, NORMMAX, intersect/NORMMAX, blockIdx.x - 1);
comparisons[blockIdx.x - 1] = intersect/NORMMAX;
}
}
}
/*
intersection
Calculates the intersection of the input histogram and a model histogram
after they have been normalized.
The input histogram is in normHistograms[0] ... normHistograms[TOTALBINS - 1]
The model histogram is in normHistograms[TOTALBINS * blockIdx.x] ...
normHistograms[TOTALBINS * blockIdx.x - 1]
Inputs:
normHistograms - array of TOTALBINS * gridDim.x bins (gridDim.x histograms)
intersect - pointer to the shared intersect value
Outputs:
shared intersect variable is incremented by the intersection calculated by the
thread running this code
*/
__device__ void intersection(float * normHistograms, float * intersect)
{
//compute intersection using cyclic partitioning
float * normHistogramTile = &normHistograms[blockIdx.x * TOTALBINS];
int tdx = threadIdx.x;
while (tdx < TOTALBINS)
{
float minTwo = fmin(normHistogramTile[tdx], normHistograms[tdx]);
atomicAdd(intersect, minTwo);
tdx += blockDim.x;
}
}
__device__ void printIntArray(int * data, int length)
{
int i, j = 0;
for (i = 0; i < length; i++, j++)
{
if ((j % 10) == 0) printf("\n%3d: ", i);
printf("%5d ", data[i]);
}
}
/*
computeHistSz
Calculates the size of a histogram by adding up all of the bin
values. The histogram to be used for the calculation is in elements
histograms[blockIdx.x * TOTALBINS] ... histograms[(blockIdx.x + 1) * TOTALBINS]
Inputs:
histograms - array of TOTALBINS * gridDim.x bins (gridDim.x histograms)
histSz - pointer to the shared histogram size variable
Outputs:
shared histogram size variable is incremented by the size calculated by
the thread running this code
*/
__device__ void computeHistSz(int * histograms, int * histSz)
{
//compute histogram size (sum of bins) using cyclic partitioning
int tdx = threadIdx.x;
int * histogramTile = &histograms[blockIdx.x * TOTALBINS];
while (tdx < TOTALBINS)
{
atomicAdd(histSz, histogramTile[tdx]);
tdx += blockDim.x;
}
}
/*
normalizeHist
Normalizes the histogram so that every bin value is between 0 and NORMMAX.
The histogram to be normalized is in elements
histograms[blockIdx.x * TOTALBINS] ... histograms[(blockIdx.x + 1) * TOTALBINS]
The result will be stored in normHistograms[blockIdx.x * TOTALBINS] ...
normHistograms[(blockIdx.x + 1) * TOTALBINS]
Inputs:
histograms - array that holds the histogram to be normalized
histSz - size of the input histogram (sum of its bins)
Outputs:
normHistograms - array to hold the normalized histogram
*/
__device__ void normalizeHist(float * normHistograms, int * histograms, int histSz)
{
//compute the normalized histogram using cyclic partitioning
int tdx = threadIdx.x;
int * histogramTile = &histograms[blockIdx.x * TOTALBINS];
float * normHistogramTile = &normHistograms[blockIdx.x * TOTALBINS];
while (tdx < TOTALBINS)
{
normHistogramTile[tdx] = (histogramTile[tdx]/(float)histSz) * NORMMAX;
tdx += blockDim.x;
}
}
//this can be used for debugging
__device__ void printFloatArray(float * array, int startIdx, int length)
{
int i, j = 0;
for (i = startIdx; i < startIdx + length; i++, j++)
{
if ((j % 16) == 0) printf("\n%3d: ", i);
printf("%6.1f ", array[i]);
}
}
//launched to get more accurate timing
__global__ void emptyKernel()
{
}
|
7abfa661fd8fce71656a069949d4a869350e838a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math_functions.h"
#include <stdio.h>
#define blockMax 500
__global__ void MatrixMulKernel(const double* A, const double* B, double* C, int N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<N)
C[i] = A[i] * B[i];
}
__global__ void SigmoidKernel(double* A, double* B, int N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
{
B[i] = 1 / (1 + exp10(-A[i]));
}
}
__global__ void DsigmoidKernel(double* A, double* B, int N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
{
double a = 1 + exp10(-A[i]);
B[i] = (a - 1) / (a*a);
}
}
hipError_t cuda_hadamardProduct(const double *A, const double *B, double *R, unsigned int size)
{
int blockNum = (size + blockMax - 1) / blockMax;
MatrixMulKernel << < blockNum, blockMax >> >(A, B, R, size);
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return cudaStatus;
}
return cudaStatus;
}
int cuda_dsigmoid(double *A, double *B, unsigned int size)
{
int blockNum = (size + blockMax - 1) / blockMax;
DsigmoidKernel << < blockNum, blockMax >> >(A, B, size);
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return 1;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return 1;
}
return 0;
}
int cuda_sigmoid(double *A, double *B, unsigned int size)
{
int blockNum = (size + blockMax - 1) / blockMax;
SigmoidKernel << < blockNum, blockMax >> >(A, B, size);
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return 1;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return 1;
}
return 0;
}
| 7abfa661fd8fce71656a069949d4a869350e838a.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math_functions.h"
#include <stdio.h>
#define blockMax 500
__global__ void MatrixMulKernel(const double* A, const double* B, double* C, int N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<N)
C[i] = A[i] * B[i];
}
__global__ void SigmoidKernel(double* A, double* B, int N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
{
B[i] = 1 / (1 + exp10(-A[i]));
}
}
__global__ void DsigmoidKernel(double* A, double* B, int N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
{
double a = 1 + exp10(-A[i]);
B[i] = (a - 1) / (a*a);
}
}
cudaError_t cuda_hadamardProduct(const double *A, const double *B, double *R, unsigned int size)
{
int blockNum = (size + blockMax - 1) / blockMax;
MatrixMulKernel << < blockNum, blockMax >> >(A, B, R, size);
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return cudaStatus;
}
return cudaStatus;
}
int cuda_dsigmoid(double *A, double *B, unsigned int size)
{
int blockNum = (size + blockMax - 1) / blockMax;
DsigmoidKernel << < blockNum, blockMax >> >(A, B, size);
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return 1;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return 1;
}
return 0;
}
int cuda_sigmoid(double *A, double *B, unsigned int size)
{
int blockNum = (size + blockMax - 1) / blockMax;
SigmoidKernel << < blockNum, blockMax >> >(A, B, size);
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return 1;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return 1;
}
return 0;
}
|
97d04abf2def30eb1690af6aa0bf46c4f6827e7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Monte Carlo simulation of Ising model on 2D lattice
// using Metropolis algorithm
// using checkerboard (even-odd) update
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <gsl/gsl_rng.h>
gsl_rng *rng=NULL; // pointer to gsl_rng random number generator
void exact_2d(double, double, double*, double*);
void rng_MT(float*, int);
double ellf(double phi, double ak);
double rf(double x, double y, double z);
double min(double x, double y, double z);
double max(double x, double y, double z);
int *spin; // host spin variables
int *d_spin; // device spin variables
float *h_rng; // host random numbers
float *d_rng; // device random numbers
__constant__ int fw[1000],bw[1000]; // declare constant memory for fw, bw
__global__ void metro_gmem_odd(int* spin, float *ranf, const float B, const float T)
{
int x, y, parity;
int i, io;
int old_spin, new_spin, spins;
int k1, k2, k3, k4;
float de;
// thread index in a block of size (tx,ty) corresponds to
// the index ie/io of the lattice with size (2*tx,ty)=(Nx,Ny).
// tid = threadIdx.x + threadIdx.y*blockDim.x = ie or io
int Nx = 2*blockDim.x; // block size before even-odd reduction
int nx = 2*blockDim.x*gridDim.x; // number of sites in x-axis of the entire lattice
// next, go over the odd sites
io = threadIdx.x + threadIdx.y*blockDim.x;
x = (2*io)%Nx;
y = ((2*io)/Nx)%Nx;
parity=(x+y+1)%2;
x = x + parity;
// add the offsets to get its position in the full lattice
x += Nx*blockIdx.x;
y += blockDim.y*blockIdx.y;
i = x + y*nx;
old_spin = spin[i];
new_spin = -old_spin;
k1 = fw[x] + y*nx; // right
k2 = x + fw[y]*nx; // top
k3 = bw[x] + y*nx; // left
k4 = x + bw[y]*nx; // bottom
spins = spin[k1] + spin[k2] + spin[k3] + spin[k4];
de = -(new_spin - old_spin)*(spins + B);
if((de <= 0.0) || (ranf[i] < exp(-de/T))) {
spin[i] = new_spin; // accept the new spin;
}
__syncthreads();
}
__global__ void metro_gmem_even(int* spin, float *ranf, const float B, const float T)
{
int x, y, parity;
int i, ie;
int old_spin, new_spin, spins;
int k1, k2, k3, k4;
float de;
// thread index in a block of size (tx,ty) corresponds to
// the index ie/io of the lattice with size (2*tx,ty)=(Nx,Ny).
// tid = threadIdx.x + threadIdx.y*blockDim.x = ie or io
int Nx = 2*blockDim.x; // block size before even-odd reduction
int nx = 2*blockDim.x*gridDim.x; // number of sites in x-axis of the entire lattice
// first, go over the even sites
ie = threadIdx.x + threadIdx.y*blockDim.x;
x = (2*ie)%Nx;
y = ((2*ie)/Nx)%Nx;
parity=(x+y)%2;
x = x + parity;
// add the offsets to get its position in the full lattice
x += Nx*blockIdx.x;
y += blockDim.y*blockIdx.y;
i = x + y*nx;
old_spin = spin[i];
new_spin = -old_spin;
k1 = fw[x] + y*nx; // right
k2 = x + fw[y]*nx; // top
k3 = bw[x] + y*nx; // left
k4 = x + bw[y]*nx; // bottom
spins = spin[k1] + spin[k2] + spin[k3] + spin[k4];
de = -(new_spin - old_spin)*(spins + B);
if((de <= 0.0) || (ranf[i] < exp(-de/T))) {
spin[i] = new_spin; // accept the new spin;
}
__syncthreads();
}
int main(void) {
int nx,ny; // # of sites in x and y directions respectively
int ns; // ns = nx*ny, total # of sites
int *ffw; // forward index
int *bbw; // backward index
int nt; // # of sweeps for thermalization
int nm; // # of measurements
int im; // interval between successive measurements
int nd; // # of sweeps between displaying results
int nb; // # of sweeps before saving spin configurations
int sweeps; // total # of sweeps at each temperature
int k1, k2; // right, top
int istart; // istart = (0: cold start/1: hot start)
double T; // temperature
double B; // external magnetic field
double energy; // total energy of the system
double mag; // total magnetization of the system
double te; // accumulator for energy
double tm; // accumulator for mag
double count; // counter for # of measurements
double M; // magnetization per site, < M >
double E; // energy per site, < E >
double E_ex; // exact solution of < E >
double M_ex; // exact solution of < M >
int gid; // GPU_ID
float gputime;
float flops;
printf("Enter the GPU ID (0/1): ");
scanf("%d",&gid);
printf("%d\n",gid);
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
err = hipSetDevice(gid);
if(err != hipSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Select GPU with device ID = %d\n", gid);
hipSetDevice(gid);
printf("Ising Model on 2D Square Lattice with p.b.c.\n");
printf("============================================\n");
printf("Initialize the RNG\n");
rng = gsl_rng_alloc(gsl_rng_mt19937);
printf("Enter the seed:\n");
long seed;
scanf("%ld",&seed);
printf("%ld\n",seed);
gsl_rng_set(rng,seed);
printf("The RNG has been initialized\n");
printf("Enter the number of sites in each dimension (<= 1000)\n");
scanf("%d",&nx);
printf("%d\n",nx);
ny=nx;
ns=nx*ny;
ffw = (int*)malloc(nx*sizeof(int));
bbw = (int*)malloc(nx*sizeof(int));
for(int i=0; i<nx; i++) {
ffw[i]=(i+1)%nx;
bbw[i]=(i-1+nx)%nx;
}
hipMemcpyToSymbol(fw, ffw, nx*sizeof(int)); // copy to constant memory
hipMemcpyToSymbol(bw, bbw, nx*sizeof(int));
spin = (int*)malloc(ns*sizeof(int)); // host spin variables
h_rng = (float*)malloc(ns*sizeof(float)); // host random numbers
printf("Enter the # of sweeps for thermalization\n");
scanf("%d",&nt);
printf("%d\n",nt);
printf("Enter the # of measurements\n");
scanf("%d",&nm);
printf("%d\n",nm);
printf("Enter the interval between successive measurements\n");
scanf("%d",&im);
printf("%d\n",im);
printf("Enter the display interval\n");
scanf("%d",&nd);
printf("%d\n",nd);
printf("Enter the interval for saving spin configuration\n");
scanf("%d",&nb);
printf("%d\n",nb);
printf("Enter the temperature (in units of J/k)\n");
scanf("%lf",&T);
printf("%lf\n",T);
printf("Enter the external magnetization\n");
scanf("%lf",&B);
printf("%lf\n",B);
printf("Initialize spins configurations :\n");
printf(" 0: cold start \n");
printf(" 1: hot start \n");
scanf("%d",&istart);
printf("%d\n",istart);
// Set the number of threads (tx,ty) per block
int tx,ty;
printf("Enter the number of threads (tx,ty) per block: ");
printf("For even/odd updating, tx=ty/2 is assumed: ");
scanf("%d %d",&tx, &ty);
printf("%d %d\n",tx, ty);
if(2*tx != ty) exit(0);
if(tx*ty > 1024) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(0);
}
dim3 threads(tx,ty);
// The total number of threads in the grid is equal to (nx/2)*ny = ns/2
int bx = nx/tx/2;
if(bx*tx*2 != nx) {
printf("The block size in x is incorrect\n");
exit(0);
}
int by = ny/ty;
if(by*ty != ny) {
printf("The block size in y is incorrect\n");
exit(0);
}
if((bx > 65535)||(by > 65535)) {
printf("The grid size exceeds the limit ! \n");
exit(0);
}
dim3 blocks(bx,by);
printf("The dimension of the grid is (%d, %d)\n",bx,by);
if(istart == 0) {
for(int j=0; j<ns; j++) { // cold start
spin[j] = 1;
}
}
else {
for(int j=0; j<ns; j++) { // hot start
if(gsl_rng_uniform(rng) > 0.5) {
spin[j] = 1;
}
else {
spin[j] = -1;
}
}
}
char fstr[20];
char f3str[20];
char buf[5];
strcpy(fstr, "./dats/ising2d_1gpu_T_");
gcvt(T, 2, buf);
strcat(fstr, buf);
strcat(fstr, ".dat");
printf(fstr);
strcpy(f3str, "./dats/spin_1gpu_T_");
strcat(f3str, buf);
strcat(f3str, ".dat");
printf(f3str);
FILE *output;
output = fopen(fstr,"w");
FILE *output3;
output3 = fopen(f3str,"w");
// Allocate vectors in device memory
hipMalloc((void**)&d_spin, ns*sizeof(int)); // device spin variables
hipMalloc((void**)&d_rng, ns*sizeof(float)); // device random numbers
// Copy vectors from host memory to device memory
hipMemcpy(d_spin, spin, ns*sizeof(int), hipMemcpyHostToDevice);
if(B == 0.0) {
exact_2d(T,B,&E_ex,&M_ex);
fprintf(output,"T=%.5e B=%.5e ns=%d E_exact=%.5e M_exact=%.5e\n", T, B, ns, E_ex, M_ex);
printf("T=%.5e B=%.5e ns=%d E_exact=%.5e M_exact=%.5e\n", T, B, ns, E_ex, M_ex);
}
else {
fprintf(output,"T=%.5e B=%.5e ns=%d\n", T, B, ns);
printf("T=%.5e B=%.5e ns=%d\n", T, B, ns);
}
fprintf(output," E M \n");
fprintf(output,"--------------------------\n");
printf("Thermalizing\n");
printf("sweeps < E > < M >\n");
printf("---------------------------------\n");
fflush(stdout);
te=0.0; // initialize the accumulators
tm=0.0;
count=0.0;
sweeps=nt+nm*im; // total # of sweeps
// create the timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//start the timer
hipEventRecord(start,0);
for(int swp=0; swp<nt; swp++) { // thermalization
rng_MT(h_rng, ns); // generate ns random numbers
hipMemcpy(d_rng, h_rng, ns*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( metro_gmem_even), dim3(blocks),dim3(threads), 0, 0, d_spin, d_rng, B, T); // updating with Metropolis algorithm
hipLaunchKernelGGL(( metro_gmem_odd), dim3(blocks),dim3(threads), 0, 0, d_spin, d_rng, B, T); // updating with Metropolis algorithm
}
for(int swp=nt; swp<sweeps; swp++) {
rng_MT(h_rng, ns); // generate ns random numbers
hipMemcpy(d_rng, h_rng, ns*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( metro_gmem_even), dim3(blocks),dim3(threads), 0, 0, d_spin, d_rng, B, T);
hipLaunchKernelGGL(( metro_gmem_odd), dim3(blocks),dim3(threads), 0, 0, d_spin, d_rng, B, T);
int k;
if(swp%im == 0) {
hipMemcpy(spin, d_spin, ns*sizeof(int), hipMemcpyDeviceToHost);
mag=0.0;
energy=0.0;
for(int j=0; j<ny; j++) {
for(int i=0; i<nx; i++) {
k = i + j*nx;
k1 = ffw[i] + j*nx;
k2 = i + ffw[j]*nx;
mag = mag + spin[k]; // total magnetization;
energy = energy - spin[k]*(spin[k1] + spin[k2]); // total bond energy;
}
}
energy = energy - B*mag;
te = te + energy;
tm = tm + mag;
count = count + 1.0;
fprintf(output, "%.5e %.5e\n", energy/(double)ns, mag/(double)ns); // save the raw data
}
if(swp%nd == 0) {
E = te/(count*(double)(ns));
M = tm/(count*(double)(ns));
printf("%d %.5e %.5e\n", swp, E, M);
}
if(swp%nb == 0) {
hipMemcpy(spin, d_spin, ns*sizeof(int), hipMemcpyDeviceToHost);
fprintf(output3,"swp = %d, spin configuration:\n",swp);
for(int j=nx-1;j>-1;j--) {
for(int i=0; i<nx; i++) {
fprintf(output3,"%d ",spin[i+j*nx]);
}
fprintf(output3,"\n");
}
fprintf(output3,"\n");
}
}
fclose(output);
fclose(output3);
printf("---------------------------------\n");
if(B == 0.0) {
printf("T=%.5e B=%.5e ns=%d E_exact=%.5e M_exact=%.5e\n", T, B, ns, E_ex, M_ex);
}
else {
printf("T=%.5e B=%.5e ns=%d\n", T, B, ns);
}
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
flops = 7.0*nx*nx*sweeps;
printf("GPU Gflops: %lf\n",flops/(1000000.0*gputime));
// destroy the timer
hipEventDestroy(start);
hipEventDestroy(stop);
gsl_rng_free(rng);
hipFree(d_spin);
hipFree(d_rng);
free(spin);
free(h_rng);
return 0;
}
// Exact solution of 2d Ising model on the infinite lattice
void exact_2d(double T, double B, double *E, double *M)
{
double x, y;
double z, Tc, K, K1;
const double pi = acos(-1.0);
K = 2.0/T;
if(B == 0.0) {
Tc = -2.0/log(sqrt(2.0) - 1.0); // critical temperature;
if(T > Tc) {
*M = 0.0;
}
else if(T < Tc) {
z = exp(-K);
*M = pow(1.0 + z*z,0.25)*pow(1.0 - 6.0*z*z + pow(z,4),0.125)/sqrt(1.0 - z*z);
}
x = 0.5*pi;
y = 2.0*sinh(K)/pow(cosh(K),2);
K1 = ellf(x, y);
*E = -1.0/tanh(K)*(1. + 2.0/pi*K1*(2.0*pow(tanh(K),2) - 1.0));
}
else
printf("Exact solution is only known for B=0 !\n");
return;
}
/*******
* ellf * Elliptic integral of the 1st kind
*******/
double ellf(double phi, double ak)
{
double ellf;
double s;
s=sin(phi);
ellf=s*rf(pow(cos(phi),2),(1.0-s*ak)*(1.0+s*ak),1.0);
return ellf;
}
double rf(double x, double y, double z)
{
double rf,ERRTOL,TINY,BIG,THIRD,C1,C2,C3,C4;
ERRTOL=0.08;
TINY=1.5e-38;
BIG=3.0e37;
THIRD=1.0/3.0;
C1=1.0/24.0;
C2=0.1;
C3=3.0/44.0;
C4=1.0/14.0;
double alamb,ave,delx,dely,delz,e2,e3,sqrtx,sqrty,sqrtz,xt,yt,zt;
if(min(x,y,z) < 0 || min(x+y,x+z,y+z) < TINY || max(x,y,z) > BIG) {
printf("invalid arguments in rf\n");
exit(1);
}
xt=x;
yt=y;
zt=z;
do {
sqrtx=sqrt(xt);
sqrty=sqrt(yt);
sqrtz=sqrt(zt);
alamb=sqrtx*(sqrty+sqrtz)+sqrty*sqrtz;
xt=0.25*(xt+alamb);
yt=0.25*(yt+alamb);
zt=0.25*(zt+alamb);
ave=THIRD*(xt+yt+zt);
delx=(ave-xt)/ave;
dely=(ave-yt)/ave;
delz=(ave-zt)/ave;
}
while (max(abs(delx),abs(dely),abs(delz)) > ERRTOL);
e2=delx*dely-pow(delz,2);
e3=delx*dely*delz;
rf=(1.0+(C1*e2-C2-C3*e3)*e2+C4*e3)/sqrt(ave);
return rf;
}
double min(double x, double y, double z)
{
double m;
m = (x < y) ? x : y;
m = (m < z) ? m : z;
return m;
}
double max(double x, double y, double z)
{
double m;
m = (x > y) ? x : y;
m = (m > z) ? m : z;
return m;
}
void rng_MT(float* data, int n) // RNG with uniform distribution in (0,1)
{
for(int i = 0; i < n; i++)
data[i] = (float) gsl_rng_uniform(rng);
}
| 97d04abf2def30eb1690af6aa0bf46c4f6827e7e.cu | // Monte Carlo simulation of Ising model on 2D lattice
// using Metropolis algorithm
// using checkerboard (even-odd) update
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <gsl/gsl_rng.h>
gsl_rng *rng=NULL; // pointer to gsl_rng random number generator
void exact_2d(double, double, double*, double*);
void rng_MT(float*, int);
double ellf(double phi, double ak);
double rf(double x, double y, double z);
double min(double x, double y, double z);
double max(double x, double y, double z);
int *spin; // host spin variables
int *d_spin; // device spin variables
float *h_rng; // host random numbers
float *d_rng; // device random numbers
__constant__ int fw[1000],bw[1000]; // declare constant memory for fw, bw
__global__ void metro_gmem_odd(int* spin, float *ranf, const float B, const float T)
{
int x, y, parity;
int i, io;
int old_spin, new_spin, spins;
int k1, k2, k3, k4;
float de;
// thread index in a block of size (tx,ty) corresponds to
// the index ie/io of the lattice with size (2*tx,ty)=(Nx,Ny).
// tid = threadIdx.x + threadIdx.y*blockDim.x = ie or io
int Nx = 2*blockDim.x; // block size before even-odd reduction
int nx = 2*blockDim.x*gridDim.x; // number of sites in x-axis of the entire lattice
// next, go over the odd sites
io = threadIdx.x + threadIdx.y*blockDim.x;
x = (2*io)%Nx;
y = ((2*io)/Nx)%Nx;
parity=(x+y+1)%2;
x = x + parity;
// add the offsets to get its position in the full lattice
x += Nx*blockIdx.x;
y += blockDim.y*blockIdx.y;
i = x + y*nx;
old_spin = spin[i];
new_spin = -old_spin;
k1 = fw[x] + y*nx; // right
k2 = x + fw[y]*nx; // top
k3 = bw[x] + y*nx; // left
k4 = x + bw[y]*nx; // bottom
spins = spin[k1] + spin[k2] + spin[k3] + spin[k4];
de = -(new_spin - old_spin)*(spins + B);
if((de <= 0.0) || (ranf[i] < exp(-de/T))) {
spin[i] = new_spin; // accept the new spin;
}
__syncthreads();
}
__global__ void metro_gmem_even(int* spin, float *ranf, const float B, const float T)
{
int x, y, parity;
int i, ie;
int old_spin, new_spin, spins;
int k1, k2, k3, k4;
float de;
// thread index in a block of size (tx,ty) corresponds to
// the index ie/io of the lattice with size (2*tx,ty)=(Nx,Ny).
// tid = threadIdx.x + threadIdx.y*blockDim.x = ie or io
int Nx = 2*blockDim.x; // block size before even-odd reduction
int nx = 2*blockDim.x*gridDim.x; // number of sites in x-axis of the entire lattice
// first, go over the even sites
ie = threadIdx.x + threadIdx.y*blockDim.x;
x = (2*ie)%Nx;
y = ((2*ie)/Nx)%Nx;
parity=(x+y)%2;
x = x + parity;
// add the offsets to get its position in the full lattice
x += Nx*blockIdx.x;
y += blockDim.y*blockIdx.y;
i = x + y*nx;
old_spin = spin[i];
new_spin = -old_spin;
k1 = fw[x] + y*nx; // right
k2 = x + fw[y]*nx; // top
k3 = bw[x] + y*nx; // left
k4 = x + bw[y]*nx; // bottom
spins = spin[k1] + spin[k2] + spin[k3] + spin[k4];
de = -(new_spin - old_spin)*(spins + B);
if((de <= 0.0) || (ranf[i] < exp(-de/T))) {
spin[i] = new_spin; // accept the new spin;
}
__syncthreads();
}
int main(void) {
int nx,ny; // # of sites in x and y directions respectively
int ns; // ns = nx*ny, total # of sites
int *ffw; // forward index
int *bbw; // backward index
int nt; // # of sweeps for thermalization
int nm; // # of measurements
int im; // interval between successive measurements
int nd; // # of sweeps between displaying results
int nb; // # of sweeps before saving spin configurations
int sweeps; // total # of sweeps at each temperature
int k1, k2; // right, top
int istart; // istart = (0: cold start/1: hot start)
double T; // temperature
double B; // external magnetic field
double energy; // total energy of the system
double mag; // total magnetization of the system
double te; // accumulator for energy
double tm; // accumulator for mag
double count; // counter for # of measurements
double M; // magnetization per site, < M >
double E; // energy per site, < E >
double E_ex; // exact solution of < E >
double M_ex; // exact solution of < M >
int gid; // GPU_ID
float gputime;
float flops;
printf("Enter the GPU ID (0/1): ");
scanf("%d",&gid);
printf("%d\n",gid);
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
err = cudaSetDevice(gid);
if(err != cudaSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Select GPU with device ID = %d\n", gid);
cudaSetDevice(gid);
printf("Ising Model on 2D Square Lattice with p.b.c.\n");
printf("============================================\n");
printf("Initialize the RNG\n");
rng = gsl_rng_alloc(gsl_rng_mt19937);
printf("Enter the seed:\n");
long seed;
scanf("%ld",&seed);
printf("%ld\n",seed);
gsl_rng_set(rng,seed);
printf("The RNG has been initialized\n");
printf("Enter the number of sites in each dimension (<= 1000)\n");
scanf("%d",&nx);
printf("%d\n",nx);
ny=nx;
ns=nx*ny;
ffw = (int*)malloc(nx*sizeof(int));
bbw = (int*)malloc(nx*sizeof(int));
for(int i=0; i<nx; i++) {
ffw[i]=(i+1)%nx;
bbw[i]=(i-1+nx)%nx;
}
cudaMemcpyToSymbol(fw, ffw, nx*sizeof(int)); // copy to constant memory
cudaMemcpyToSymbol(bw, bbw, nx*sizeof(int));
spin = (int*)malloc(ns*sizeof(int)); // host spin variables
h_rng = (float*)malloc(ns*sizeof(float)); // host random numbers
printf("Enter the # of sweeps for thermalization\n");
scanf("%d",&nt);
printf("%d\n",nt);
printf("Enter the # of measurements\n");
scanf("%d",&nm);
printf("%d\n",nm);
printf("Enter the interval between successive measurements\n");
scanf("%d",&im);
printf("%d\n",im);
printf("Enter the display interval\n");
scanf("%d",&nd);
printf("%d\n",nd);
printf("Enter the interval for saving spin configuration\n");
scanf("%d",&nb);
printf("%d\n",nb);
printf("Enter the temperature (in units of J/k)\n");
scanf("%lf",&T);
printf("%lf\n",T);
printf("Enter the external magnetization\n");
scanf("%lf",&B);
printf("%lf\n",B);
printf("Initialize spins configurations :\n");
printf(" 0: cold start \n");
printf(" 1: hot start \n");
scanf("%d",&istart);
printf("%d\n",istart);
// Set the number of threads (tx,ty) per block
int tx,ty;
printf("Enter the number of threads (tx,ty) per block: ");
printf("For even/odd updating, tx=ty/2 is assumed: ");
scanf("%d %d",&tx, &ty);
printf("%d %d\n",tx, ty);
if(2*tx != ty) exit(0);
if(tx*ty > 1024) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(0);
}
dim3 threads(tx,ty);
// The total number of threads in the grid is equal to (nx/2)*ny = ns/2
int bx = nx/tx/2;
if(bx*tx*2 != nx) {
printf("The block size in x is incorrect\n");
exit(0);
}
int by = ny/ty;
if(by*ty != ny) {
printf("The block size in y is incorrect\n");
exit(0);
}
if((bx > 65535)||(by > 65535)) {
printf("The grid size exceeds the limit ! \n");
exit(0);
}
dim3 blocks(bx,by);
printf("The dimension of the grid is (%d, %d)\n",bx,by);
if(istart == 0) {
for(int j=0; j<ns; j++) { // cold start
spin[j] = 1;
}
}
else {
for(int j=0; j<ns; j++) { // hot start
if(gsl_rng_uniform(rng) > 0.5) {
spin[j] = 1;
}
else {
spin[j] = -1;
}
}
}
char fstr[20];
char f3str[20];
char buf[5];
strcpy(fstr, "./dats/ising2d_1gpu_T_");
gcvt(T, 2, buf);
strcat(fstr, buf);
strcat(fstr, ".dat");
printf(fstr);
strcpy(f3str, "./dats/spin_1gpu_T_");
strcat(f3str, buf);
strcat(f3str, ".dat");
printf(f3str);
FILE *output;
output = fopen(fstr,"w");
FILE *output3;
output3 = fopen(f3str,"w");
// Allocate vectors in device memory
cudaMalloc((void**)&d_spin, ns*sizeof(int)); // device spin variables
cudaMalloc((void**)&d_rng, ns*sizeof(float)); // device random numbers
// Copy vectors from host memory to device memory
cudaMemcpy(d_spin, spin, ns*sizeof(int), cudaMemcpyHostToDevice);
if(B == 0.0) {
exact_2d(T,B,&E_ex,&M_ex);
fprintf(output,"T=%.5e B=%.5e ns=%d E_exact=%.5e M_exact=%.5e\n", T, B, ns, E_ex, M_ex);
printf("T=%.5e B=%.5e ns=%d E_exact=%.5e M_exact=%.5e\n", T, B, ns, E_ex, M_ex);
}
else {
fprintf(output,"T=%.5e B=%.5e ns=%d\n", T, B, ns);
printf("T=%.5e B=%.5e ns=%d\n", T, B, ns);
}
fprintf(output," E M \n");
fprintf(output,"--------------------------\n");
printf("Thermalizing\n");
printf("sweeps < E > < M >\n");
printf("---------------------------------\n");
fflush(stdout);
te=0.0; // initialize the accumulators
tm=0.0;
count=0.0;
sweeps=nt+nm*im; // total # of sweeps
// create the timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//start the timer
cudaEventRecord(start,0);
for(int swp=0; swp<nt; swp++) { // thermalization
rng_MT(h_rng, ns); // generate ns random numbers
cudaMemcpy(d_rng, h_rng, ns*sizeof(float), cudaMemcpyHostToDevice);
metro_gmem_even<<<blocks,threads>>>(d_spin, d_rng, B, T); // updating with Metropolis algorithm
metro_gmem_odd<<<blocks,threads>>>(d_spin, d_rng, B, T); // updating with Metropolis algorithm
}
for(int swp=nt; swp<sweeps; swp++) {
rng_MT(h_rng, ns); // generate ns random numbers
cudaMemcpy(d_rng, h_rng, ns*sizeof(float), cudaMemcpyHostToDevice);
metro_gmem_even<<<blocks,threads>>>(d_spin, d_rng, B, T);
metro_gmem_odd<<<blocks,threads>>>(d_spin, d_rng, B, T);
int k;
if(swp%im == 0) {
cudaMemcpy(spin, d_spin, ns*sizeof(int), cudaMemcpyDeviceToHost);
mag=0.0;
energy=0.0;
for(int j=0; j<ny; j++) {
for(int i=0; i<nx; i++) {
k = i + j*nx;
k1 = ffw[i] + j*nx;
k2 = i + ffw[j]*nx;
mag = mag + spin[k]; // total magnetization;
energy = energy - spin[k]*(spin[k1] + spin[k2]); // total bond energy;
}
}
energy = energy - B*mag;
te = te + energy;
tm = tm + mag;
count = count + 1.0;
fprintf(output, "%.5e %.5e\n", energy/(double)ns, mag/(double)ns); // save the raw data
}
if(swp%nd == 0) {
E = te/(count*(double)(ns));
M = tm/(count*(double)(ns));
printf("%d %.5e %.5e\n", swp, E, M);
}
if(swp%nb == 0) {
cudaMemcpy(spin, d_spin, ns*sizeof(int), cudaMemcpyDeviceToHost);
fprintf(output3,"swp = %d, spin configuration:\n",swp);
for(int j=nx-1;j>-1;j--) {
for(int i=0; i<nx; i++) {
fprintf(output3,"%d ",spin[i+j*nx]);
}
fprintf(output3,"\n");
}
fprintf(output3,"\n");
}
}
fclose(output);
fclose(output3);
printf("---------------------------------\n");
if(B == 0.0) {
printf("T=%.5e B=%.5e ns=%d E_exact=%.5e M_exact=%.5e\n", T, B, ns, E_ex, M_ex);
}
else {
printf("T=%.5e B=%.5e ns=%d\n", T, B, ns);
}
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
flops = 7.0*nx*nx*sweeps;
printf("GPU Gflops: %lf\n",flops/(1000000.0*gputime));
// destroy the timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
gsl_rng_free(rng);
cudaFree(d_spin);
cudaFree(d_rng);
free(spin);
free(h_rng);
return 0;
}
// Exact solution of 2d Ising model on the infinite lattice
void exact_2d(double T, double B, double *E, double *M)
{
double x, y;
double z, Tc, K, K1;
const double pi = acos(-1.0);
K = 2.0/T;
if(B == 0.0) {
Tc = -2.0/log(sqrt(2.0) - 1.0); // critical temperature;
if(T > Tc) {
*M = 0.0;
}
else if(T < Tc) {
z = exp(-K);
*M = pow(1.0 + z*z,0.25)*pow(1.0 - 6.0*z*z + pow(z,4),0.125)/sqrt(1.0 - z*z);
}
x = 0.5*pi;
y = 2.0*sinh(K)/pow(cosh(K),2);
K1 = ellf(x, y);
*E = -1.0/tanh(K)*(1. + 2.0/pi*K1*(2.0*pow(tanh(K),2) - 1.0));
}
else
printf("Exact solution is only known for B=0 !\n");
return;
}
/*******
* ellf * Elliptic integral of the 1st kind
*******/
double ellf(double phi, double ak)
{
double ellf;
double s;
s=sin(phi);
ellf=s*rf(pow(cos(phi),2),(1.0-s*ak)*(1.0+s*ak),1.0);
return ellf;
}
double rf(double x, double y, double z)
{
double rf,ERRTOL,TINY,BIG,THIRD,C1,C2,C3,C4;
ERRTOL=0.08;
TINY=1.5e-38;
BIG=3.0e37;
THIRD=1.0/3.0;
C1=1.0/24.0;
C2=0.1;
C3=3.0/44.0;
C4=1.0/14.0;
double alamb,ave,delx,dely,delz,e2,e3,sqrtx,sqrty,sqrtz,xt,yt,zt;
if(min(x,y,z) < 0 || min(x+y,x+z,y+z) < TINY || max(x,y,z) > BIG) {
printf("invalid arguments in rf\n");
exit(1);
}
xt=x;
yt=y;
zt=z;
do {
sqrtx=sqrt(xt);
sqrty=sqrt(yt);
sqrtz=sqrt(zt);
alamb=sqrtx*(sqrty+sqrtz)+sqrty*sqrtz;
xt=0.25*(xt+alamb);
yt=0.25*(yt+alamb);
zt=0.25*(zt+alamb);
ave=THIRD*(xt+yt+zt);
delx=(ave-xt)/ave;
dely=(ave-yt)/ave;
delz=(ave-zt)/ave;
}
while (max(abs(delx),abs(dely),abs(delz)) > ERRTOL);
e2=delx*dely-pow(delz,2);
e3=delx*dely*delz;
rf=(1.0+(C1*e2-C2-C3*e3)*e2+C4*e3)/sqrt(ave);
return rf;
}
double min(double x, double y, double z)
{
double m;
m = (x < y) ? x : y;
m = (m < z) ? m : z;
return m;
}
double max(double x, double y, double z)
{
double m;
m = (x > y) ? x : y;
m = (m > z) ? m : z;
return m;
}
void rng_MT(float* data, int n) // RNG with uniform distribution in (0,1)
{
for(int i = 0; i < n; i++)
data[i] = (float) gsl_rng_uniform(rng);
}
|
f2106b6f8c3b3608f4e37638c61bf638cf5ca38e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../common/polybenchUtilFuncts.h"
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
typedef double DT;
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
{
printf( "The file was not opened\n" );
}
else{
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed, DT* f){
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row,int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows, DT* f)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
DT *F_gpu;
hipMalloc((void **)&F_gpu, sizeof(DT) *2);
hipMemcpy(F_gpu, f, sizeof(DT) *2, hipMemcpyHostToDevice);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed,F_gpu);
}
hipMemcpy(f, F_gpu, sizeof(DT) *2, hipMemcpyDeviceToHost);
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
double t_start, t_end;
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
DT* f;
f = (DT*)malloc(2*sizeof(DT));
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice);
hipMalloc((void**)&MatrixPower, sizeof(float)*size);
hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
t_start = rtclock();
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows,f);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
printf("Ending simulation\n");
hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost);
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
printf("%x %x\n",*(int *)&(f[0]),*(int *)&(f[1]));
hipFree(MatrixPower);
hipFree(MatrixTemp[0]);
hipFree(MatrixTemp[1]);
free(MatrixOut);
}
| f2106b6f8c3b3608f4e37638c61bf638cf5ca38e.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
typedef double DT;
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
{
printf( "The file was not opened\n" );
}
else{
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed, DT* f){
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row,int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows, DT* f)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
DT *F_gpu;
cudaMalloc((void **)&F_gpu, sizeof(DT) *2);
cudaMemcpy(F_gpu, f, sizeof(DT) *2, cudaMemcpyHostToDevice);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed,F_gpu);
}
cudaMemcpy(f, F_gpu, sizeof(DT) *2, cudaMemcpyDeviceToHost);
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
double t_start, t_end;
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
DT* f;
f = (DT*)malloc(2*sizeof(DT));
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&MatrixPower, sizeof(float)*size);
cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
t_start = rtclock();
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows,f);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
printf("Ending simulation\n");
cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost);
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
printf("%x %x\n",*(int *)&(f[0]),*(int *)&(f[1]));
cudaFree(MatrixPower);
cudaFree(MatrixTemp[0]);
cudaFree(MatrixTemp[1]);
free(MatrixOut);
}
|
73cf4dd7801c15e2dfc93fd845c5312dea5b33dd.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <typeinfo>
#include <random>
#include <stdint.h>
#include <rocblas.h>
#define DEBUG
#include <gemm/dispatch.h>
#include <gemm/epilogue_function.h>
//#include "util/matrix.h"
#include "util/timer.h"
using namespace cutlass;
// for Windows testing
float drand48() {
return rand() / (RAND_MAX + 1.0);
}
int main(int argc, const char **argv) {
// consts
int m = 10240;
int k = 4096;
int n = 4096;
float alpha = 1.0;
float beta = 0.0;
int g_timing_iterations = 10;
static const int TransformA = 0;
static const int TransformB = 0;
hipStream_t stream = 0;
// definitions
float *A, *B, *C, *C2;
hipMallocManaged(&A, m*k * sizeof(float) );
hipMallocManaged(&B, k*n * sizeof(float) );
hipMallocManaged(&C, m*n * sizeof(float) );
hipMallocManaged(&C2, m*n * sizeof(float) );
// fill out
for( int jndex=0; jndex<k; jndex++ ) {
for(int index=0; index<m; index++) {
A[jndex*m + index] = drand48();
}
}
for( int jndex=0; jndex<n; jndex++ ) {
for(int index=0; index<k; index++) {
B[jndex*k + index] = drand48();
}
}
for( int jndex=0; jndex<n; jndex++ ) {
for(int index=0; index<m; index++) {
C[jndex*m + index] = 0;
C2[jndex*m + index] = 0;
}
}
hipDeviceSynchronize();
// CUBLAS
hipblasHandle_t g_cublas_handle;
hipblasCreate(&g_cublas_handle);
gpu_timer timer;
for (int i = 0; i < g_timing_iterations+2; i++) {
if (i == 2) timer.start();
CUDA_PERROR(hipblasSgemm(
g_cublas_handle,
(hipblasOperation_t) TransformA,
(hipblasOperation_t) TransformB,
m,
n,
k,
&alpha,
A,
m,
B,
k,
&beta,
C,
m));
}
timer.stop();
// calculate CUBLAS time
int64_t num_flops = (2 * int64_t(m) * int64_t(n) * int64_t(k)) + (2 * int64_t(m) * int64_t(n));
double tcublas = timer.elapsed_millis() / g_timing_iterations;
double cublas_flops = double(num_flops) / tcublas / 1.0e6;
// CUTLASS
typedef gemm::blas_scaled_epilogue<float, float, float> epilogue_op_t;
epilogue_op_t epilogue(alpha, beta);
for (int i = 0; i < g_timing_iterations+2; i++) {
if (i == 2) timer.start();
gemm::dispatch<epilogue_op_t>(
m,
n,
k,
alpha,
beta,
A,
B,
C2,
stream,
false);
}
timer.stop();
// calculate CUTLASS time
double tcutlass = timer.elapsed_millis() / g_timing_iterations;
double cutlass_flops = double(num_flops) / tcutlass / 1.0e6;
// error performance summary. No need to optimize below this line
printf("CUBLAS: %.2f Gflops, CUTLASS: %.2f Gflops\n", cublas_flops, cutlass_flops);
hipDeviceSynchronize();
double err = 0;
for (int i=0; i<n; i++) {
for (int j=0; j<m; j++) {
// err += fabs(C.get(i,j) - C2.get(i,j));
err += fabs(C[i*m + j] - C2[i*m + j]);
}
}
printf("error: %lf\n", err/n/m);
hipblasDestroy(g_cublas_handle);
}
| 73cf4dd7801c15e2dfc93fd845c5312dea5b33dd.cu | #include <iostream>
#include <typeinfo>
#include <random>
#include <stdint.h>
#include <cublas_v2.h>
#define DEBUG
#include <gemm/dispatch.h>
#include <gemm/epilogue_function.h>
//#include "util/matrix.h"
#include "util/timer.h"
using namespace cutlass;
// for Windows testing
float drand48() {
return rand() / (RAND_MAX + 1.0);
}
int main(int argc, const char **argv) {
// consts
int m = 10240;
int k = 4096;
int n = 4096;
float alpha = 1.0;
float beta = 0.0;
int g_timing_iterations = 10;
static const int TransformA = 0;
static const int TransformB = 0;
cudaStream_t stream = 0;
// definitions
float *A, *B, *C, *C2;
cudaMallocManaged(&A, m*k * sizeof(float) );
cudaMallocManaged(&B, k*n * sizeof(float) );
cudaMallocManaged(&C, m*n * sizeof(float) );
cudaMallocManaged(&C2, m*n * sizeof(float) );
// fill out
for( int jndex=0; jndex<k; jndex++ ) {
for(int index=0; index<m; index++) {
A[jndex*m + index] = drand48();
}
}
for( int jndex=0; jndex<n; jndex++ ) {
for(int index=0; index<k; index++) {
B[jndex*k + index] = drand48();
}
}
for( int jndex=0; jndex<n; jndex++ ) {
for(int index=0; index<m; index++) {
C[jndex*m + index] = 0;
C2[jndex*m + index] = 0;
}
}
cudaDeviceSynchronize();
// CUBLAS
cublasHandle_t g_cublas_handle;
cublasCreate(&g_cublas_handle);
gpu_timer timer;
for (int i = 0; i < g_timing_iterations+2; i++) {
if (i == 2) timer.start();
CUDA_PERROR(cublasSgemm(
g_cublas_handle,
(cublasOperation_t) TransformA,
(cublasOperation_t) TransformB,
m,
n,
k,
&alpha,
A,
m,
B,
k,
&beta,
C,
m));
}
timer.stop();
// calculate CUBLAS time
int64_t num_flops = (2 * int64_t(m) * int64_t(n) * int64_t(k)) + (2 * int64_t(m) * int64_t(n));
double tcublas = timer.elapsed_millis() / g_timing_iterations;
double cublas_flops = double(num_flops) / tcublas / 1.0e6;
// CUTLASS
typedef gemm::blas_scaled_epilogue<float, float, float> epilogue_op_t;
epilogue_op_t epilogue(alpha, beta);
for (int i = 0; i < g_timing_iterations+2; i++) {
if (i == 2) timer.start();
gemm::dispatch<epilogue_op_t>(
m,
n,
k,
alpha,
beta,
A,
B,
C2,
stream,
false);
}
timer.stop();
// calculate CUTLASS time
double tcutlass = timer.elapsed_millis() / g_timing_iterations;
double cutlass_flops = double(num_flops) / tcutlass / 1.0e6;
// error performance summary. No need to optimize below this line
printf("CUBLAS: %.2f Gflops, CUTLASS: %.2f Gflops\n", cublas_flops, cutlass_flops);
cudaDeviceSynchronize();
double err = 0;
for (int i=0; i<n; i++) {
for (int j=0; j<m; j++) {
// err += fabs(C.get(i,j) - C2.get(i,j));
err += fabs(C[i*m + j] - C2[i*m + j]);
}
}
printf("error: %lf\n", err/n/m);
cublasDestroy(g_cublas_handle);
}
|
9ea27772cc4b74af1ef1010091050ae08c109414.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#include "sobel_kernel.hip"
#define DEFAULT_THRESHOLD 8000
#define DEFAULT_FILENAME "BWstop-sign.ppm"
#define TILE_WIDTH 14
#define BLOCK_WIDTH 16
void SobelOnDevice(int* result, unsigned int* pic, int width, int height, int thresh);
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp)
{
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// really read
char duh[80];
char *line = chars;
// find the start of the pixel data.
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++)
pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
int x,y;
fp = fopen(filename, "wb");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
//check
bool check(int* des, int* src, int len){
for(int i=0; i<len; ++i){
if(des[i] != src[i]){
printf("at %d des = %d, src = %d", i, des[i], src[i]);
return false;
}
}
return true;
}
int main( int argc, char **argv )
{
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = strdup( DEFAULT_FILENAME);
if (argc > 1) {
if (argc == 3) { // filename AND threshold
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) { // default file but specified threshhold
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
int *result = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
int i, j, magnitude, sum1, sum2;
for (int col=0; col<xsize; col++) {
for (int row=0; row<ysize; row++) {
*result++ = 0;
}
}
for (i = 1; i < ysize - 1; i++) {
for (j = 1; j < xsize -1; j++) {
int offset = i*xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[offset] = 255;
else
result[offset] = 0;
}
}
write_ppm( "result8000gold.ppm", xsize, ysize, 255, result);
// GPU Version
int *resultGPU = (int *) malloc( numbytes );
if (!resultGPU) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
SobelOnDevice(resultGPU, pic, xsize, ysize, thresh);
//check for test
printf("xsize is %d, and ysize is %d: \n ", xsize, ysize);
printf( "Test %s\n", (check(resultGPU, result, xsize * ysize)) ? "PASSED" : "FAILED");
write_ppm( "result9000gold.ppm", xsize, ysize, 255, resultGPU);
fprintf(stderr, "sobel done\n");
}
////////////////////////////////////////////////////////////////////////////////
//! Sobel On CUDA
////////////////////////////////////////////////////////////////////////////////
void SobelOnDevice(int* result, unsigned int* pic, int width, int height, int thresh)
{
// Device input vectors
unsigned int *d_pic;
size_t bytes = width * height * sizeof( int );
// Allocate memory for each vector on GPU
hipMalloc(&d_pic, bytes);
// Copy host vectors to device
hipMemcpy(d_pic, pic, bytes, hipMemcpyHostToDevice);
// Allocate P on the device
int *d_res;
hipMalloc(&d_res, 3*bytes);
// Setup the execution configuration
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
dim3 dimGrid((width-1)/TILE_WIDTH + 1, (height-1)/TILE_WIDTH + 1, 1);
// Launch the device computation threads!
hipLaunchKernelGGL(( SobelKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_res, d_pic, width, height, thresh);
// Copy array back to host
hipMemcpy(result, d_res, 3*bytes, hipMemcpyDeviceToHost );
// Free device matrices
hipFree(d_pic);
hipFree(d_res);
}
| 9ea27772cc4b74af1ef1010091050ae08c109414.cu |
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#include "sobel_kernel.cu"
#define DEFAULT_THRESHOLD 8000
#define DEFAULT_FILENAME "BWstop-sign.ppm"
#define TILE_WIDTH 14
#define BLOCK_WIDTH 16
void SobelOnDevice(int* result, unsigned int* pic, int width, int height, int thresh);
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp)
{
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// really read
char duh[80];
char *line = chars;
// find the start of the pixel data.
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++)
pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
int x,y;
fp = fopen(filename, "wb");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
//check
bool check(int* des, int* src, int len){
for(int i=0; i<len; ++i){
if(des[i] != src[i]){
printf("at %d des = %d, src = %d", i, des[i], src[i]);
return false;
}
}
return true;
}
int main( int argc, char **argv )
{
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = strdup( DEFAULT_FILENAME);
if (argc > 1) {
if (argc == 3) { // filename AND threshold
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) { // default file but specified threshhold
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
int *result = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
int i, j, magnitude, sum1, sum2;
for (int col=0; col<xsize; col++) {
for (int row=0; row<ysize; row++) {
*result++ = 0;
}
}
for (i = 1; i < ysize - 1; i++) {
for (j = 1; j < xsize -1; j++) {
int offset = i*xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[offset] = 255;
else
result[offset] = 0;
}
}
write_ppm( "result8000gold.ppm", xsize, ysize, 255, result);
// GPU Version
int *resultGPU = (int *) malloc( numbytes );
if (!resultGPU) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
SobelOnDevice(resultGPU, pic, xsize, ysize, thresh);
//check for test
printf("xsize is %d, and ysize is %d: \n ", xsize, ysize);
printf( "Test %s\n", (check(resultGPU, result, xsize * ysize)) ? "PASSED" : "FAILED");
write_ppm( "result9000gold.ppm", xsize, ysize, 255, resultGPU);
fprintf(stderr, "sobel done\n");
}
////////////////////////////////////////////////////////////////////////////////
//! Sobel On CUDA
////////////////////////////////////////////////////////////////////////////////
void SobelOnDevice(int* result, unsigned int* pic, int width, int height, int thresh)
{
// Device input vectors
unsigned int *d_pic;
size_t bytes = width * height * sizeof( int );
// Allocate memory for each vector on GPU
cudaMalloc(&d_pic, bytes);
// Copy host vectors to device
cudaMemcpy(d_pic, pic, bytes, cudaMemcpyHostToDevice);
// Allocate P on the device
int *d_res;
cudaMalloc(&d_res, 3*bytes);
// Setup the execution configuration
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
dim3 dimGrid((width-1)/TILE_WIDTH + 1, (height-1)/TILE_WIDTH + 1, 1);
// Launch the device computation threads!
SobelKernel<<<dimGrid, dimBlock>>>(d_res, d_pic, width, height, thresh);
// Copy array back to host
cudaMemcpy(result, d_res, 3*bytes, cudaMemcpyDeviceToHost );
// Free device matrices
cudaFree(d_pic);
cudaFree(d_res);
}
|
296f86ae112a4b0f7b73a43666556fe198ba3460.hip | // !!! This is a file automatically generated by hipify!!!
/*=========================================================================
*
* Copyright RTK Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
// rtk includes
#include "rtkCudaLaplacianImageFilter.hcu"
#include "rtkCudaFirstOrderKernels.hcu"
#include "rtkCudaUtilities.hcu"
#include <itkMacro.h>
// cuda includes
#include <hip/hip_runtime.h>
void
CUDA_laplacian(int size[3], float spacing[3], float * dev_in, float * dev_out)
{
int3 dev_Size = make_int3(size[0], size[1], size[2]);
float3 dev_Spacing = make_float3(spacing[0], spacing[1], spacing[2]);
// Reset output volume
long int memorySizeOutput = size[0] * size[1] * size[2] * sizeof(float);
hipMemset((void *)dev_out, 0, memorySizeOutput);
// Initialize volumes to store the gradient components
float * dev_grad_x;
float * dev_grad_y;
float * dev_grad_z;
hipMalloc((void **)&dev_grad_x, memorySizeOutput);
hipMalloc((void **)&dev_grad_y, memorySizeOutput);
hipMalloc((void **)&dev_grad_z, memorySizeOutput);
hipMemset(dev_grad_x, 0, memorySizeOutput);
hipMemset(dev_grad_y, 0, memorySizeOutput);
hipMemset(dev_grad_z, 0, memorySizeOutput);
// Thread Block Dimensions
dim3 dimBlock = dim3(16, 4, 4);
int blocksInX = iDivUp(size[0], dimBlock.x);
int blocksInY = iDivUp(size[1], dimBlock.y);
int blocksInZ = iDivUp(size[2], dimBlock.z);
dim3 dimGrid = dim3(blocksInX, blocksInY, blocksInZ);
hipLaunchKernelGGL(( gradient_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_in, dev_grad_x, dev_grad_y, dev_grad_z, dev_Size, dev_Spacing);
CUDA_CHECK_ERROR;
hipLaunchKernelGGL(( divergence_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_grad_x, dev_grad_y, dev_grad_z, dev_out, dev_Size, dev_Spacing);
CUDA_CHECK_ERROR;
// Cleanup
hipFree(dev_grad_x);
hipFree(dev_grad_y);
hipFree(dev_grad_z);
CUDA_CHECK_ERROR;
}
| 296f86ae112a4b0f7b73a43666556fe198ba3460.cu | /*=========================================================================
*
* Copyright RTK Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
// rtk includes
#include "rtkCudaLaplacianImageFilter.hcu"
#include "rtkCudaFirstOrderKernels.hcu"
#include "rtkCudaUtilities.hcu"
#include <itkMacro.h>
// cuda includes
#include <cuda.h>
void
CUDA_laplacian(int size[3], float spacing[3], float * dev_in, float * dev_out)
{
int3 dev_Size = make_int3(size[0], size[1], size[2]);
float3 dev_Spacing = make_float3(spacing[0], spacing[1], spacing[2]);
// Reset output volume
long int memorySizeOutput = size[0] * size[1] * size[2] * sizeof(float);
cudaMemset((void *)dev_out, 0, memorySizeOutput);
// Initialize volumes to store the gradient components
float * dev_grad_x;
float * dev_grad_y;
float * dev_grad_z;
cudaMalloc((void **)&dev_grad_x, memorySizeOutput);
cudaMalloc((void **)&dev_grad_y, memorySizeOutput);
cudaMalloc((void **)&dev_grad_z, memorySizeOutput);
cudaMemset(dev_grad_x, 0, memorySizeOutput);
cudaMemset(dev_grad_y, 0, memorySizeOutput);
cudaMemset(dev_grad_z, 0, memorySizeOutput);
// Thread Block Dimensions
dim3 dimBlock = dim3(16, 4, 4);
int blocksInX = iDivUp(size[0], dimBlock.x);
int blocksInY = iDivUp(size[1], dimBlock.y);
int blocksInZ = iDivUp(size[2], dimBlock.z);
dim3 dimGrid = dim3(blocksInX, blocksInY, blocksInZ);
gradient_kernel<<<dimGrid, dimBlock>>>(dev_in, dev_grad_x, dev_grad_y, dev_grad_z, dev_Size, dev_Spacing);
CUDA_CHECK_ERROR;
divergence_kernel<<<dimGrid, dimBlock>>>(dev_grad_x, dev_grad_y, dev_grad_z, dev_out, dev_Size, dev_Spacing);
CUDA_CHECK_ERROR;
// Cleanup
cudaFree(dev_grad_x);
cudaFree(dev_grad_y);
cudaFree(dev_grad_z);
CUDA_CHECK_ERROR;
}
|
9ee3c02ca57a58fe6a4bd1ef0dbedb7395ab25cc.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2017 Stanford University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "circuit.h"
#include "hip/hip_runtime.h"
class GPUAccumulateCharge {
public:
typedef float LHS;
typedef float RHS;
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void apply(LHS &lhs, RHS &rhs)
{
#ifdef __CUDA_ARCH__
float *target = &lhs;
atomicAdd(target,rhs);
#else
assert(false);
#endif
}
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void fold(RHS &rhs1, RHS rhs2)
{
#ifdef __CUDA_ARCH__
float *target = &rhs1;
atomicAdd(target,rhs2);
#else
assert(false);
#endif
}
};
template<typename AT, int SEGMENTS>
struct SegmentAccessors {
public:
__host__ __device__
inline AT& operator[](unsigned index) { return accessors[index]; }
__host__ __device__
inline const AT& operator[](unsigned index) const { return accessors[index]; }
public:
AT accessors[SEGMENTS];
};
__device__ __forceinline__
float find_node_voltage(const AccessorROfloat &pvt,
const AccessorROfloat &shr,
const AccessorROfloat &ghost,
Point<1> ptr, PointerLocation loc)
{
switch (loc)
{
case PRIVATE_PTR:
return pvt[ptr];
case SHARED_PTR:
return shr[ptr];
case GHOST_PTR:
return ghost[ptr];
default:
break; // assert(false);
}
return 0.f;
}
__global__
void calc_new_currents_kernel(Point<1> first,
int num_wires,
float dt,
int steps,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_inductance,
const AccessorROfloat fa_resistance,
const AccessorROfloat fa_wire_cap,
const AccessorROfloat fa_pvt_voltage,
const AccessorROfloat fa_shr_voltage,
const AccessorROfloat fa_ghost_voltage,
const SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS> fa_currents,
const SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS-1> fa_voltages)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
// We can do this because we know we have SOA layout and wires are dense
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float recip_dt = 1.f/dt;
float temp_v[WIRE_SEGMENTS+1];
float temp_i[WIRE_SEGMENTS];
float old_i[WIRE_SEGMENTS];
float old_v[WIRE_SEGMENTS-1];
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = fa_currents[i][wire_ptr];
old_i[i] = temp_i[i];
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = fa_voltages[i][wire_ptr];
old_v[i] = temp_v[i+1];
}
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
temp_v[0] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
temp_v[WIRE_SEGMENTS] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
// Solve the RLC model iteratively
float inductance = fa_inductance[wire_ptr];
float recip_resistance = 1.f/fa_resistance[wire_ptr];
float recip_capacitance = 1.f/fa_wire_cap[wire_ptr];
for (int j = 0; j < steps; j++)
{
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = ((temp_v[i] - temp_v[i+1]) -
(inductance * (temp_i[i] - old_i[i]) * recip_dt)) * recip_resistance;
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = old_v[i] + dt * (temp_i[i] - temp_i[i+1]) * recip_capacitance;
}
}
// Write out the result
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i][wire_ptr] = temp_i[i];
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i][wire_ptr] = temp_v[i+1];
}
}
/*static*/
__host__
void CalcNewCurrentsTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS> fa_currents;
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i] = AccessorRWfloat(regions[0], FID_CURRENT+i);
SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS-1> fa_voltages;
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i] = AccessorRWfloat(regions[0], FID_WIRE_VOLTAGE+i);
const AccessorROpoint fa_in_ptr(regions[1], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[1], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[1], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[1], FID_OUT_LOC);
const AccessorROfloat fa_inductance(regions[1], FID_INDUCTANCE);
const AccessorROfloat fa_resistance(regions[1], FID_RESISTANCE);
const AccessorROfloat fa_wire_cap(regions[1], FID_WIRE_CAP);
const AccessorROfloat fa_pvt_voltage(regions[2], FID_NODE_VOLTAGE);
const AccessorROfloat fa_shr_voltage(regions[3], FID_NODE_VOLTAGE);
const AccessorROfloat fa_ghost_voltage(regions[4], FID_NODE_VOLTAGE);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
hipLaunchKernelGGL(( calc_new_currents_kernel), dim3(num_blocks),dim3(threads_per_block), 0, 0, piece.first_wire,
piece.num_wires,
piece.dt,
piece.steps,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_inductance,
fa_resistance,
fa_wire_cap,
fa_pvt_voltage,
fa_shr_voltage,
fa_ghost_voltage,
fa_currents,
fa_voltages);
#endif
}
template<typename REDOP>
__device__ __forceinline__
void reduce_local(const AccessorRWfloat &pvt,
const AccessorRDfloat &shr,
const AccessorRDfloat &ghost,
Point<1> ptr, PointerLocation loc, typename REDOP::RHS value)
{
switch (loc)
{
case PRIVATE_PTR:
pvt.template reduce<REDOP,true/*exclusive*/>(ptr, value);
break;
case SHARED_PTR:
shr.template reduce<REDOP,false/*exclusive*/>(ptr, value);
break;
case GHOST_PTR:
ghost.template reduce<REDOP,false/*exclusive*/>(ptr, value);
break;
default:
break; // assert(false); // should never make it here
}
}
__global__
void distribute_charge_kernel(Point<1> first,
const int num_wires,
float dt,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_in_current,
const AccessorROfloat fa_out_current,
const AccessorRWfloat fa_pvt_charge,
const AccessorRDfloat fa_shr_charge,
const AccessorRDfloat fa_ghost_charge)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float in_dq = -dt * fa_in_current[wire_ptr];
float out_dq = dt * fa_out_current[wire_ptr];
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
reduce_local<GPUAccumulateCharge>(fa_pvt_charge, fa_shr_charge, fa_ghost_charge,
in_ptr, in_loc, in_dq);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
reduce_local<GPUAccumulateCharge>(fa_pvt_charge, fa_shr_charge, fa_ghost_charge,
out_ptr, out_loc, out_dq);
}
}
/*static*/
__host__
void DistributeChargeTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorROpoint fa_in_ptr(regions[0], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[0], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[0], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[0], FID_OUT_LOC);
const AccessorROfloat fa_in_current(regions[0], FID_CURRENT);
const AccessorROfloat fa_out_current(regions[0], FID_CURRENT+WIRE_SEGMENTS-1);
const AccessorRWfloat fa_pvt_charge(regions[1], FID_CHARGE);
const AccessorRDfloat fa_shr_charge(regions[2], FID_CHARGE, REDUCE_ID);
const AccessorRDfloat fa_ghost_charge(regions[3], FID_CHARGE, REDUCE_ID);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
hipLaunchKernelGGL(( distribute_charge_kernel), dim3(num_blocks),dim3(threads_per_block), 0, 0, piece.first_wire,
piece.num_wires,
piece.dt,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_in_current,
fa_out_current,
fa_pvt_charge,
fa_shr_charge,
fa_ghost_charge);
#endif
}
__global__
void update_voltages_kernel(Point<1> first,
const int num_nodes,
const AccessorRWfloat fa_pvt_voltage,
const AccessorRWfloat fa_shr_voltage,
const AccessorRWfloat fa_pvt_charge,
const AccessorRWfloat fa_shr_charge,
const AccessorROfloat fa_pvt_cap,
const AccessorROfloat fa_shr_cap,
const AccessorROfloat fa_pvt_leakage,
const AccessorROfloat fa_shr_leakage,
const AccessorROloc fa_ptr_loc)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes)
{
const Point<1> node_ptr = first + tid;
PointerLocation node_loc = fa_ptr_loc[node_ptr];
if (node_loc == PRIVATE_PTR)
{
float voltage = fa_pvt_voltage[node_ptr];
float charge = fa_pvt_charge[node_ptr];
float capacitance = fa_pvt_cap[node_ptr];
float leakage = fa_pvt_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
else
{
float voltage = fa_shr_voltage[node_ptr];
float charge = fa_shr_charge[node_ptr];
float capacitance = fa_shr_cap[node_ptr];
float leakage = fa_shr_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
}
}
/*static*/
__host__
void UpdateVoltagesTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorRWfloat fa_pvt_voltage(regions[0], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_pvt_charge(regions[0], FID_CHARGE);
const AccessorRWfloat fa_shr_voltage(regions[1], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_shr_charge(regions[1], FID_CHARGE);
const AccessorROfloat fa_pvt_cap(regions[2], FID_NODE_CAP);
const AccessorROfloat fa_pvt_leakage(regions[2], FID_LEAKAGE);
const AccessorROfloat fa_shr_cap(regions[3], FID_NODE_CAP);
const AccessorROfloat fa_shr_leakage(regions[3], FID_LEAKAGE);
const AccessorROloc fa_ptr_loc(regions[4], FID_LOCATOR);
const int threads_per_block = 256;
const int num_blocks = (piece.num_nodes + (threads_per_block-1)) / threads_per_block;
hipLaunchKernelGGL(( update_voltages_kernel), dim3(num_blocks),dim3(threads_per_block), 0, 0, piece.first_node,
piece.num_nodes,
fa_pvt_voltage,
fa_shr_voltage,
fa_pvt_charge,
fa_shr_charge,
fa_pvt_cap,
fa_shr_cap,
fa_pvt_leakage,
fa_shr_leakage,
fa_ptr_loc);
#endif
}
| 9ee3c02ca57a58fe6a4bd1ef0dbedb7395ab25cc.cu | /* Copyright 2017 Stanford University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "circuit.h"
#include "cuda_runtime.h"
class GPUAccumulateCharge {
public:
typedef float LHS;
typedef float RHS;
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void apply(LHS &lhs, RHS &rhs)
{
#ifdef __CUDA_ARCH__
float *target = &lhs;
atomicAdd(target,rhs);
#else
assert(false);
#endif
}
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void fold(RHS &rhs1, RHS rhs2)
{
#ifdef __CUDA_ARCH__
float *target = &rhs1;
atomicAdd(target,rhs2);
#else
assert(false);
#endif
}
};
template<typename AT, int SEGMENTS>
struct SegmentAccessors {
public:
__host__ __device__
inline AT& operator[](unsigned index) { return accessors[index]; }
__host__ __device__
inline const AT& operator[](unsigned index) const { return accessors[index]; }
public:
AT accessors[SEGMENTS];
};
__device__ __forceinline__
float find_node_voltage(const AccessorROfloat &pvt,
const AccessorROfloat &shr,
const AccessorROfloat &ghost,
Point<1> ptr, PointerLocation loc)
{
switch (loc)
{
case PRIVATE_PTR:
return pvt[ptr];
case SHARED_PTR:
return shr[ptr];
case GHOST_PTR:
return ghost[ptr];
default:
break; // assert(false);
}
return 0.f;
}
__global__
void calc_new_currents_kernel(Point<1> first,
int num_wires,
float dt,
int steps,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_inductance,
const AccessorROfloat fa_resistance,
const AccessorROfloat fa_wire_cap,
const AccessorROfloat fa_pvt_voltage,
const AccessorROfloat fa_shr_voltage,
const AccessorROfloat fa_ghost_voltage,
const SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS> fa_currents,
const SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS-1> fa_voltages)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
// We can do this because we know we have SOA layout and wires are dense
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float recip_dt = 1.f/dt;
float temp_v[WIRE_SEGMENTS+1];
float temp_i[WIRE_SEGMENTS];
float old_i[WIRE_SEGMENTS];
float old_v[WIRE_SEGMENTS-1];
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = fa_currents[i][wire_ptr];
old_i[i] = temp_i[i];
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = fa_voltages[i][wire_ptr];
old_v[i] = temp_v[i+1];
}
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
temp_v[0] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
temp_v[WIRE_SEGMENTS] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
// Solve the RLC model iteratively
float inductance = fa_inductance[wire_ptr];
float recip_resistance = 1.f/fa_resistance[wire_ptr];
float recip_capacitance = 1.f/fa_wire_cap[wire_ptr];
for (int j = 0; j < steps; j++)
{
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = ((temp_v[i] - temp_v[i+1]) -
(inductance * (temp_i[i] - old_i[i]) * recip_dt)) * recip_resistance;
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = old_v[i] + dt * (temp_i[i] - temp_i[i+1]) * recip_capacitance;
}
}
// Write out the result
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i][wire_ptr] = temp_i[i];
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i][wire_ptr] = temp_v[i+1];
}
}
/*static*/
__host__
void CalcNewCurrentsTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS> fa_currents;
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i] = AccessorRWfloat(regions[0], FID_CURRENT+i);
SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS-1> fa_voltages;
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i] = AccessorRWfloat(regions[0], FID_WIRE_VOLTAGE+i);
const AccessorROpoint fa_in_ptr(regions[1], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[1], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[1], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[1], FID_OUT_LOC);
const AccessorROfloat fa_inductance(regions[1], FID_INDUCTANCE);
const AccessorROfloat fa_resistance(regions[1], FID_RESISTANCE);
const AccessorROfloat fa_wire_cap(regions[1], FID_WIRE_CAP);
const AccessorROfloat fa_pvt_voltage(regions[2], FID_NODE_VOLTAGE);
const AccessorROfloat fa_shr_voltage(regions[3], FID_NODE_VOLTAGE);
const AccessorROfloat fa_ghost_voltage(regions[4], FID_NODE_VOLTAGE);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
calc_new_currents_kernel<<<num_blocks,threads_per_block>>>(piece.first_wire,
piece.num_wires,
piece.dt,
piece.steps,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_inductance,
fa_resistance,
fa_wire_cap,
fa_pvt_voltage,
fa_shr_voltage,
fa_ghost_voltage,
fa_currents,
fa_voltages);
#endif
}
template<typename REDOP>
__device__ __forceinline__
void reduce_local(const AccessorRWfloat &pvt,
const AccessorRDfloat &shr,
const AccessorRDfloat &ghost,
Point<1> ptr, PointerLocation loc, typename REDOP::RHS value)
{
switch (loc)
{
case PRIVATE_PTR:
pvt.template reduce<REDOP,true/*exclusive*/>(ptr, value);
break;
case SHARED_PTR:
shr.template reduce<REDOP,false/*exclusive*/>(ptr, value);
break;
case GHOST_PTR:
ghost.template reduce<REDOP,false/*exclusive*/>(ptr, value);
break;
default:
break; // assert(false); // should never make it here
}
}
__global__
void distribute_charge_kernel(Point<1> first,
const int num_wires,
float dt,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_in_current,
const AccessorROfloat fa_out_current,
const AccessorRWfloat fa_pvt_charge,
const AccessorRDfloat fa_shr_charge,
const AccessorRDfloat fa_ghost_charge)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float in_dq = -dt * fa_in_current[wire_ptr];
float out_dq = dt * fa_out_current[wire_ptr];
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
reduce_local<GPUAccumulateCharge>(fa_pvt_charge, fa_shr_charge, fa_ghost_charge,
in_ptr, in_loc, in_dq);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
reduce_local<GPUAccumulateCharge>(fa_pvt_charge, fa_shr_charge, fa_ghost_charge,
out_ptr, out_loc, out_dq);
}
}
/*static*/
__host__
void DistributeChargeTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorROpoint fa_in_ptr(regions[0], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[0], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[0], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[0], FID_OUT_LOC);
const AccessorROfloat fa_in_current(regions[0], FID_CURRENT);
const AccessorROfloat fa_out_current(regions[0], FID_CURRENT+WIRE_SEGMENTS-1);
const AccessorRWfloat fa_pvt_charge(regions[1], FID_CHARGE);
const AccessorRDfloat fa_shr_charge(regions[2], FID_CHARGE, REDUCE_ID);
const AccessorRDfloat fa_ghost_charge(regions[3], FID_CHARGE, REDUCE_ID);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
distribute_charge_kernel<<<num_blocks,threads_per_block>>>(piece.first_wire,
piece.num_wires,
piece.dt,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_in_current,
fa_out_current,
fa_pvt_charge,
fa_shr_charge,
fa_ghost_charge);
#endif
}
__global__
void update_voltages_kernel(Point<1> first,
const int num_nodes,
const AccessorRWfloat fa_pvt_voltage,
const AccessorRWfloat fa_shr_voltage,
const AccessorRWfloat fa_pvt_charge,
const AccessorRWfloat fa_shr_charge,
const AccessorROfloat fa_pvt_cap,
const AccessorROfloat fa_shr_cap,
const AccessorROfloat fa_pvt_leakage,
const AccessorROfloat fa_shr_leakage,
const AccessorROloc fa_ptr_loc)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes)
{
const Point<1> node_ptr = first + tid;
PointerLocation node_loc = fa_ptr_loc[node_ptr];
if (node_loc == PRIVATE_PTR)
{
float voltage = fa_pvt_voltage[node_ptr];
float charge = fa_pvt_charge[node_ptr];
float capacitance = fa_pvt_cap[node_ptr];
float leakage = fa_pvt_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
else
{
float voltage = fa_shr_voltage[node_ptr];
float charge = fa_shr_charge[node_ptr];
float capacitance = fa_shr_cap[node_ptr];
float leakage = fa_shr_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
}
}
/*static*/
__host__
void UpdateVoltagesTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorRWfloat fa_pvt_voltage(regions[0], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_pvt_charge(regions[0], FID_CHARGE);
const AccessorRWfloat fa_shr_voltage(regions[1], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_shr_charge(regions[1], FID_CHARGE);
const AccessorROfloat fa_pvt_cap(regions[2], FID_NODE_CAP);
const AccessorROfloat fa_pvt_leakage(regions[2], FID_LEAKAGE);
const AccessorROfloat fa_shr_cap(regions[3], FID_NODE_CAP);
const AccessorROfloat fa_shr_leakage(regions[3], FID_LEAKAGE);
const AccessorROloc fa_ptr_loc(regions[4], FID_LOCATOR);
const int threads_per_block = 256;
const int num_blocks = (piece.num_nodes + (threads_per_block-1)) / threads_per_block;
update_voltages_kernel<<<num_blocks,threads_per_block>>>(piece.first_node,
piece.num_nodes,
fa_pvt_voltage,
fa_shr_voltage,
fa_pvt_charge,
fa_shr_charge,
fa_pvt_cap,
fa_shr_cap,
fa_pvt_leakage,
fa_shr_leakage,
fa_ptr_loc);
#endif
}
|
pinhole_camera.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_helpers_hip.cuh"
rtDeclareVariable(float3, eye, , );
rtDeclareVariable(float3, U, , );
rtDeclareVariable(float3, V, , );
rtDeclareVariable(float3, W, , );
rtDeclareVariable(uint, volume_width, ,);
rtDeclareVariable(uint, volume_height, ,);
rtDeclareVariable(uint, volume_depth, ,);
rtDeclareVariable(uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable(uint2, launch_dim, rtLaunchDim, );
rtDeclareVariable(rtObject, top_object, ,);
rtBuffer<uchar4, 2> output_buffer;
RT_PROGRAM void pinhole_camera() {
float2 d = make_float2(launch_index) / make_float2(launch_dim) * 2.f - 1.f;
float3 ray_origin = eye;
float3 ray_direction = optix::normalize(d.x*U + d.y*V + W);
/*#ifdef DEBUG
float t = tex3D(
volume_texture,
launch_index.x/(float) volume_width,
launch_index.y/(float) volume_height,
(frame_idx % volume_depth)/(float) volume_depth
);
if (launch_index.x == 128 && launch_index.y == 128) rtPrintf("Setting t to %f for %d %d %d \n", t, launch_index.x, launch_index.y, frame_idx);
#else
float t = tex3D(volume_texture, launch_index.x, launch_index.y, 0);
if (launch_index.x == 64 && launch_index.y == 64) rtPrintf("Setting t to %f for %d %d %d\n", t, launch_index.x, launch_index.y, frame_idx);
#endif
*/
optix::Ray ray = optix::make_Ray(ray_origin, ray_direction, 0, 0.f, RT_DEFAULT_MAX);
PerRayData_radiance prd;
prd.r = prd.g = prd.b = prd.alpha = 0;
rtTrace(top_object, ray, prd);
output_buffer[launch_index] = make_colour(
optix::make_float3(prd.r, prd.g, prd.b)
// optix::make_float3(t/255.0f, t/255.0f, t/255.0f)
// ray_direction*2
// transferred
);
}
| pinhole_camera.cu | #include "device_helpers.cuh"
rtDeclareVariable(float3, eye, , );
rtDeclareVariable(float3, U, , );
rtDeclareVariable(float3, V, , );
rtDeclareVariable(float3, W, , );
rtDeclareVariable(uint, volume_width, ,);
rtDeclareVariable(uint, volume_height, ,);
rtDeclareVariable(uint, volume_depth, ,);
rtDeclareVariable(uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable(uint2, launch_dim, rtLaunchDim, );
rtDeclareVariable(rtObject, top_object, ,);
rtBuffer<uchar4, 2> output_buffer;
RT_PROGRAM void pinhole_camera() {
float2 d = make_float2(launch_index) / make_float2(launch_dim) * 2.f - 1.f;
float3 ray_origin = eye;
float3 ray_direction = optix::normalize(d.x*U + d.y*V + W);
/*#ifdef DEBUG
float t = tex3D(
volume_texture,
launch_index.x/(float) volume_width,
launch_index.y/(float) volume_height,
(frame_idx % volume_depth)/(float) volume_depth
);
if (launch_index.x == 128 && launch_index.y == 128) rtPrintf("Setting t to %f for %d %d %d \n", t, launch_index.x, launch_index.y, frame_idx);
#else
float t = tex3D(volume_texture, launch_index.x, launch_index.y, 0);
if (launch_index.x == 64 && launch_index.y == 64) rtPrintf("Setting t to %f for %d %d %d\n", t, launch_index.x, launch_index.y, frame_idx);
#endif
*/
optix::Ray ray = optix::make_Ray(ray_origin, ray_direction, 0, 0.f, RT_DEFAULT_MAX);
PerRayData_radiance prd;
prd.r = prd.g = prd.b = prd.alpha = 0;
rtTrace(top_object, ray, prd);
output_buffer[launch_index] = make_colour(
optix::make_float3(prd.r, prd.g, prd.b)
// optix::make_float3(t/255.0f, t/255.0f, t/255.0f)
// ray_direction*2
// transferred
);
}
|
c5b7a2dcc8d716bd1fe9681879a5e50683203261.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 512
__global__ void getMaxValueOfRow(float *d_arr, float *maxArray) {
unsigned int t = threadIdx.x;
unsigned int bid = blockIdx.x;
__shared__ float ds_arr[N];
ds_arr[t] = d_arr[t + bid * N];
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
__syncthreads();
if (t % (2 * stride) == 0)
ds_arr[t] = ds_arr[t + stride] > ds_arr[t] ? ds_arr[t + stride] : ds_arr[t];
}
maxArray[bid % N] = ds_arr[t];
}
int main() {
float *h_arr, *d_arr, *h_maxArray, *d_maxArray;
int total = N * N;
int mem_size = total * sizeof(float);
h_arr = (float *) malloc(mem_size);
h_maxArray = (float *) malloc(N * sizeof(float));
for (int i = 0; i < total; i++) {
h_arr[i] = 3.0;
}
for (int i = 0; i < N; i++) {
h_maxArray[i] = 0.0;
}
hipMalloc((void **) &d_arr, mem_size);
hipMalloc((void **) &d_maxArray, N * sizeof(float));
hipMemcpy(d_arr, h_arr, mem_size, hipMemcpyHostToDevice);
hipMemcpy(d_maxArray, h_maxArray, N * sizeof(float), hipMemcpyHostToDevice);
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(N);
//
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( getMaxValueOfRow) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_arr, d_maxArray);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Time elapsed: %.6f ms\n", time);
hipEventDestroy(start);
hipEventDestroy(stop);
hipMemcpy(h_maxArray, d_maxArray, N * sizeof(float), hipMemcpyDeviceToHost);
// for (int i = 0; i < N; ++i) {
// printf("The max number of row %d :%.f\n", i, h_maxArray[i]);
// }
//
int count = 0;
for (int i = 0; i < N * N; ++i) {
if (h_maxArray[i] == 3)
count++;
}
printf("count = %d\n", count);
hipFree(d_arr);
hipFree(d_maxArray);
free(h_arr);
free(h_maxArray);
return 0;
} | c5b7a2dcc8d716bd1fe9681879a5e50683203261.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 512
__global__ void getMaxValueOfRow(float *d_arr, float *maxArray) {
unsigned int t = threadIdx.x;
unsigned int bid = blockIdx.x;
__shared__ float ds_arr[N];
ds_arr[t] = d_arr[t + bid * N];
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
__syncthreads();
if (t % (2 * stride) == 0)
ds_arr[t] = ds_arr[t + stride] > ds_arr[t] ? ds_arr[t + stride] : ds_arr[t];
}
maxArray[bid % N] = ds_arr[t];
}
int main() {
float *h_arr, *d_arr, *h_maxArray, *d_maxArray;
int total = N * N;
int mem_size = total * sizeof(float);
h_arr = (float *) malloc(mem_size);
h_maxArray = (float *) malloc(N * sizeof(float));
for (int i = 0; i < total; i++) {
h_arr[i] = 3.0;
}
for (int i = 0; i < N; i++) {
h_maxArray[i] = 0.0;
}
cudaMalloc((void **) &d_arr, mem_size);
cudaMalloc((void **) &d_maxArray, N * sizeof(float));
cudaMemcpy(d_arr, h_arr, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_maxArray, h_maxArray, N * sizeof(float), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(N);
// 记录程序开始运行的时间
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
getMaxValueOfRow <<< blocksPerGrid, threadsPerBlock >>> (d_arr, d_maxArray);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time elapsed: %.6f ms\n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(h_maxArray, d_maxArray, N * sizeof(float), cudaMemcpyDeviceToHost);
// for (int i = 0; i < N; ++i) {
// printf("The max number of row %d :%.f\n", i, h_maxArray[i]);
// }
// 验证结果
int count = 0;
for (int i = 0; i < N * N; ++i) {
if (h_maxArray[i] == 3)
count++;
}
printf("count = %d\n", count);
cudaFree(d_arr);
cudaFree(d_maxArray);
free(h_arr);
free(h_maxArray);
return 0;
} |
d635b1f31dc0e9c9ee15a696d367963b5180d66c.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <stdlib.h>
// CUDA runtime
#include "helper.h"
#include <rocblas.h>
#include <hip/hip_runtime.h>
/**
* naive
*/
template <int BLOCK>
__global__ void sgemm(int m, int n, int k, float *a, int lda, float *b, int ldb,
float *c, int ldc) {
int _m = blockIdx.x * BLOCK + threadIdx.x;
int _n = blockIdx.y * BLOCK + threadIdx.y;
if (_m < m and _n < n) {
float sum = 0.f;
for (int i = 0; i < k; ++i) {
sum += a[_m * k + i] * b[i * n + _n];
}
c[_m * n + _n] = sum;
}
}
void MY_MMult(hipblasHandle_t handle, int m, int n, int k, float *d_A, int lda,
float *d_B, int ldb, float *d_C, int ldc) {
constexpr int BLOCK = 16;
// subm, subn, subk
dim3 block(BLOCK, BLOCK);
dim3 grid((m + BLOCK - 1) / BLOCK, (n + BLOCK - 1) / BLOCK);
hipLaunchKernelGGL(( sgemm<BLOCK>), dim3(grid), dim3(block), 0, 0, m, n, k, d_A, lda, d_B, ldb, d_C, ldc);
}
| d635b1f31dc0e9c9ee15a696d367963b5180d66c.cu | #include <assert.h>
#include <stdlib.h>
// CUDA runtime
#include "helper.h"
#include <cublas_v2.h>
#include <cuda_runtime.h>
/**
* naive 实现
*/
template <int BLOCK>
__global__ void sgemm(int m, int n, int k, float *a, int lda, float *b, int ldb,
float *c, int ldc) {
int _m = blockIdx.x * BLOCK + threadIdx.x;
int _n = blockIdx.y * BLOCK + threadIdx.y;
if (_m < m and _n < n) {
float sum = 0.f;
for (int i = 0; i < k; ++i) {
sum += a[_m * k + i] * b[i * n + _n];
}
c[_m * n + _n] = sum;
}
}
void MY_MMult(cublasHandle_t handle, int m, int n, int k, float *d_A, int lda,
float *d_B, int ldb, float *d_C, int ldc) {
constexpr int BLOCK = 16;
// subm, subn, subk
dim3 block(BLOCK, BLOCK);
dim3 grid((m + BLOCK - 1) / BLOCK, (n + BLOCK - 1) / BLOCK);
sgemm<BLOCK><<<grid, block>>>(m, n, k, d_A, lda, d_B, ldb, d_C, ldc);
}
|
1a3d7c152decafd02bd2429dd7b56ff7a3260766.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zgedensereimsplit.cu, normal z -> s, Tue Aug 30 09:38:46 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
sgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
float * A,
float * ReA,
float * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_S_MAKE( MAGMA_S_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_S_MAKE( MAGMA_S_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_s_matrix
input matrix A.
@param[out]
ReA magma_s_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_s_matrix*
output matrix contaning real contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C"
magma_int_t
magma_sgedensereimsplit(
magma_s_matrix A,
magma_s_matrix *ReA,
magma_s_matrix *ImA,
magma_queue_t queue )
{
magma_smtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_smtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sgedensereimsplit_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
| 1a3d7c152decafd02bd2429dd7b56ff7a3260766.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zgedensereimsplit.cu, normal z -> s, Tue Aug 30 09:38:46 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
sgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
float * A,
float * ReA,
float * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_S_MAKE( MAGMA_S_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_S_MAKE( MAGMA_S_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_s_matrix
input matrix A.
@param[out]
ReA magma_s_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_s_matrix*
output matrix contaning real contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C"
magma_int_t
magma_sgedensereimsplit(
magma_s_matrix A,
magma_s_matrix *ReA,
magma_s_matrix *ImA,
magma_queue_t queue )
{
magma_smtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_smtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
sgedensereimsplit_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
|
7e9c8d84853fce9a7f63d2e418439e245e8175ef.hip | // !!! This is a file automatically generated by hipify!!!
/** \file "advectshift.cu" : implements the kernel for the "advectshift" procedure
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_ADVECTSHIFT
#define BLOCK_X 16
// BLOCK_Y : in radius
#define BLOCK_Y 2
#define GET_TAB(u,x,y,pitch) *(u + __mul24(y, pitch) + x)
//PolarGrid *WorkShift;
__device__ int Shift[32768];
__global__ void kernel_advsh (double *num,
double *work,
int ns, int pitch) {
__shared__ double buffer[(6*BLOCK_X)*BLOCK_Y];
int jg = threadIdx.x + blockIdx.x * blockDim.x * 4;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int js = threadIdx.x;
int is = threadIdx.y;
int jgm, jgp;
int ids = js+is*6*blockDim.x;
int nshift = Shift[ig];
buffer[ids + blockDim.x] = GET_TAB (num, jg, ig, pitch);
buffer[ids + 2*blockDim.x] = GET_TAB (num, jg+blockDim.x, ig, pitch);
buffer[ids + 3*blockDim.x] = GET_TAB (num, jg+blockDim.x*2, ig, pitch);
buffer[ids + 4*blockDim.x] = GET_TAB (num, jg+blockDim.x*3, ig, pitch);
if (nshift > 0) {
jgm = jg - blockDim.x;
if (jgm < 0) jgm += ns;
buffer[ids] = GET_TAB (num, jgm, ig, pitch);
}
if (nshift < 0) {
jgp = jg + 4*blockDim.x;
if (jgp >= ns) jgp -= ns;
buffer[ids + 5*blockDim.x] = GET_TAB (num, jgp, ig, pitch);
}
__syncthreads ();
GET_TAB (work, jg, ig, pitch) = buffer[ids + blockDim.x - nshift];
GET_TAB (work, jg+blockDim.x, ig, pitch) = buffer[ids + 2*blockDim.x - nshift];
GET_TAB (work, jg+2*blockDim.x, ig, pitch) = buffer[ids + 3*blockDim.x - nshift];
GET_TAB (work, jg+3*blockDim.x, ig, pitch) = buffer[ids + 4*blockDim.x - nshift];
}
extern "C"
void AdvectSHIFT_gpu (PolarGrid *array, int *Nshift) {
//static int FirstTime = YES;
int nr, ns;
double *temp_gpu_ptr;
nr = array->Nrad;
ns = array->Nsec;
//if (FirstTime) {
// WorkShift = CreatePolarGrid (nr, ns, "WorkShift");
// FirstTime = NO;
//}
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x/4, (nr+block.y-1)/block.y);
checkCudaErrors(hipMemcpyToSymbol(Shift, (void *)Nshift, (size_t)(nr*sizeof(int)), 0, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_advsh) , dim3(grid), dim3(block) , 0, 0, array->gpu_field,
WorkShift->gpu_field,
ns,
array->pitch/sizeof(double));
hipDeviceSynchronize();
getLastCudaError("Kernel advsh execution failed");
/* Swap gpu arrays to avoid memcpy on device (would halve the performance) */
temp_gpu_ptr = array->gpu_field;
array->gpu_field = WorkShift->gpu_field;
WorkShift->gpu_field = temp_gpu_ptr;
}
| 7e9c8d84853fce9a7f63d2e418439e245e8175ef.cu | /** \file "advectshift.cu" : implements the kernel for the "advectshift" procedure
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <cuda.h>
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_ADVECTSHIFT
#define BLOCK_X 16
// BLOCK_Y : in radius
#define BLOCK_Y 2
#define GET_TAB(u,x,y,pitch) *(u + __mul24(y, pitch) + x)
//PolarGrid *WorkShift;
__device__ int Shift[32768];
__global__ void kernel_advsh (double *num,
double *work,
int ns, int pitch) {
__shared__ double buffer[(6*BLOCK_X)*BLOCK_Y];
int jg = threadIdx.x + blockIdx.x * blockDim.x * 4;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int js = threadIdx.x;
int is = threadIdx.y;
int jgm, jgp;
int ids = js+is*6*blockDim.x;
int nshift = Shift[ig];
buffer[ids + blockDim.x] = GET_TAB (num, jg, ig, pitch);
buffer[ids + 2*blockDim.x] = GET_TAB (num, jg+blockDim.x, ig, pitch);
buffer[ids + 3*blockDim.x] = GET_TAB (num, jg+blockDim.x*2, ig, pitch);
buffer[ids + 4*blockDim.x] = GET_TAB (num, jg+blockDim.x*3, ig, pitch);
if (nshift > 0) {
jgm = jg - blockDim.x;
if (jgm < 0) jgm += ns;
buffer[ids] = GET_TAB (num, jgm, ig, pitch);
}
if (nshift < 0) {
jgp = jg + 4*blockDim.x;
if (jgp >= ns) jgp -= ns;
buffer[ids + 5*blockDim.x] = GET_TAB (num, jgp, ig, pitch);
}
__syncthreads ();
GET_TAB (work, jg, ig, pitch) = buffer[ids + blockDim.x - nshift];
GET_TAB (work, jg+blockDim.x, ig, pitch) = buffer[ids + 2*blockDim.x - nshift];
GET_TAB (work, jg+2*blockDim.x, ig, pitch) = buffer[ids + 3*blockDim.x - nshift];
GET_TAB (work, jg+3*blockDim.x, ig, pitch) = buffer[ids + 4*blockDim.x - nshift];
}
extern "C"
void AdvectSHIFT_gpu (PolarGrid *array, int *Nshift) {
//static int FirstTime = YES;
int nr, ns;
double *temp_gpu_ptr;
nr = array->Nrad;
ns = array->Nsec;
//if (FirstTime) {
// WorkShift = CreatePolarGrid (nr, ns, "WorkShift");
// FirstTime = NO;
//}
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x/4, (nr+block.y-1)/block.y);
checkCudaErrors(cudaMemcpyToSymbol(Shift, (void *)Nshift, (size_t)(nr*sizeof(int)), 0, cudaMemcpyHostToDevice));
kernel_advsh <<< grid, block >>> (array->gpu_field,
WorkShift->gpu_field,
ns,
array->pitch/sizeof(double));
cudaThreadSynchronize();
getLastCudaError("Kernel advsh execution failed");
/* Swap gpu arrays to avoid memcpy on device (would halve the performance) */
temp_gpu_ptr = array->gpu_field;
array->gpu_field = WorkShift->gpu_field;
WorkShift->gpu_field = temp_gpu_ptr;
}
|
79232e6435a7ded9607e2d7f571fe68db3ce5e12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cuSearchDoublet(const int* nSpM, const float* spMmat, const int* nSpB, const float* spBmat, const int* nSpT, const float* spTmat, const float* deltaRMin, const float* deltaRMax, const float* cotThetaMax, const float* collisionRegionMin, const float* collisionRegionMax, int* nSpMcomp, int* nSpBcompPerSpM_Max, int* nSpTcompPerSpM_Max, int* nSpBcompPerSpM, int* nSpTcompPerSpM, int* McompIndex, int* BcompIndex, int* tmpBcompIndex, int* TcompIndex, int* tmpTcompIndex ){
extern __shared__ float sharedMem[];
int* mPos = (int*)sharedMem;
int* isMcompat = (int*)&mPos[1];
if (threadIdx.x==0) {
*isMcompat = false;
}
__syncthreads();
float rM = spMmat[blockIdx.x +(*nSpM)*3];
float zM = spMmat[blockIdx.x +(*nSpM)*2];
bool isBcompat(true);
bool isTcompat(true);
int offset(0);
while (offset < max(*nSpB, *nSpT) ){
isBcompat = true;
// Doublet search for bottom hits
if (threadIdx.x+offset < *nSpB ){
float rB = spBmat[threadIdx.x+offset+(*nSpB)*3];
float zB = spBmat[threadIdx.x+offset+(*nSpB)*2];
float deltaR = rM - rB;
if (deltaR > *deltaRMax){
isBcompat = false;
}
if (deltaR < *deltaRMin){
isBcompat = false;
}
float cotTheta = (zM - zB)/deltaR;
if (fabsf(cotTheta) > *cotThetaMax){
isBcompat = false;
}
float zOrigin = zM - rM*cotTheta;
if (zOrigin < *collisionRegionMin || zOrigin > *collisionRegionMax){
isBcompat = false;
}
if ( isBcompat == true ){
int bPos = atomicAdd(&nSpBcompPerSpM[blockIdx.x], 1);
tmpBcompIndex[bPos+(*nSpB)*blockIdx.x]=threadIdx.x+offset;
}
}
isTcompat = true;
// Doublet search for top hits
if (threadIdx.x+offset < *nSpT){
float rT = spTmat[threadIdx.x+offset+(*nSpT)*3];
float zT = spTmat[threadIdx.x+offset+(*nSpT)*2];
float deltaR = rT - rM;
if (deltaR < *deltaRMin){
isTcompat = false;
}
if (deltaR > *deltaRMax){
isTcompat = false;
}
if (isTcompat == true){
float cotTheta = (zT - zM)/deltaR;
if (fabsf(cotTheta) > *cotThetaMax){
isTcompat = false;
}
float zOrigin = zM - rM*cotTheta;
if (zOrigin < *collisionRegionMin || zOrigin > *collisionRegionMax){
isTcompat = false;
}
}
if ( isTcompat == true ){
int tPos = atomicAdd(&nSpTcompPerSpM[blockIdx.x], 1);
tmpTcompIndex[tPos+(*nSpT)*blockIdx.x]=threadIdx.x+offset;
}
}
offset += blockDim.x;
}
__syncthreads();
if (threadIdx.x == 0){
if (nSpBcompPerSpM[blockIdx.x] > 0 && nSpTcompPerSpM[blockIdx.x] > 0 ){
*mPos = atomicAdd(nSpMcomp,1);
*isMcompat = true;
McompIndex[*mPos] = blockIdx.x;
int bMax = atomicMax(nSpBcompPerSpM_Max,nSpBcompPerSpM[blockIdx.x]);
int tMax = atomicMax(nSpTcompPerSpM_Max,nSpTcompPerSpM[blockIdx.x]);
}
}
__syncthreads();
if (*isMcompat == true){
offset = 0;
while(offset< max(nSpBcompPerSpM[blockIdx.x], nSpTcompPerSpM[blockIdx.x] ) ){
if (threadIdx.x+offset < nSpBcompPerSpM[blockIdx.x]){
BcompIndex[threadIdx.x+offset+(*nSpB)*(*mPos)]
= tmpBcompIndex[threadIdx.x+offset+(*nSpB)*blockIdx.x];
}
if (threadIdx.x+offset < nSpTcompPerSpM[blockIdx.x]){
TcompIndex[threadIdx.x+offset+(*nSpT)*(*mPos)]
= tmpTcompIndex[threadIdx.x+offset+(*nSpT)*blockIdx.x];
}
offset += blockDim.x;
}
}
} | 79232e6435a7ded9607e2d7f571fe68db3ce5e12.cu | #include "includes.h"
__global__ void cuSearchDoublet(const int* nSpM, const float* spMmat, const int* nSpB, const float* spBmat, const int* nSpT, const float* spTmat, const float* deltaRMin, const float* deltaRMax, const float* cotThetaMax, const float* collisionRegionMin, const float* collisionRegionMax, int* nSpMcomp, int* nSpBcompPerSpM_Max, int* nSpTcompPerSpM_Max, int* nSpBcompPerSpM, int* nSpTcompPerSpM, int* McompIndex, int* BcompIndex, int* tmpBcompIndex, int* TcompIndex, int* tmpTcompIndex ){
extern __shared__ float sharedMem[];
int* mPos = (int*)sharedMem;
int* isMcompat = (int*)&mPos[1];
if (threadIdx.x==0) {
*isMcompat = false;
}
__syncthreads();
float rM = spMmat[blockIdx.x +(*nSpM)*3];
float zM = spMmat[blockIdx.x +(*nSpM)*2];
bool isBcompat(true);
bool isTcompat(true);
int offset(0);
while (offset < max(*nSpB, *nSpT) ){
isBcompat = true;
// Doublet search for bottom hits
if (threadIdx.x+offset < *nSpB ){
float rB = spBmat[threadIdx.x+offset+(*nSpB)*3];
float zB = spBmat[threadIdx.x+offset+(*nSpB)*2];
float deltaR = rM - rB;
if (deltaR > *deltaRMax){
isBcompat = false;
}
if (deltaR < *deltaRMin){
isBcompat = false;
}
float cotTheta = (zM - zB)/deltaR;
if (fabsf(cotTheta) > *cotThetaMax){
isBcompat = false;
}
float zOrigin = zM - rM*cotTheta;
if (zOrigin < *collisionRegionMin || zOrigin > *collisionRegionMax){
isBcompat = false;
}
if ( isBcompat == true ){
int bPos = atomicAdd(&nSpBcompPerSpM[blockIdx.x], 1);
tmpBcompIndex[bPos+(*nSpB)*blockIdx.x]=threadIdx.x+offset;
}
}
isTcompat = true;
// Doublet search for top hits
if (threadIdx.x+offset < *nSpT){
float rT = spTmat[threadIdx.x+offset+(*nSpT)*3];
float zT = spTmat[threadIdx.x+offset+(*nSpT)*2];
float deltaR = rT - rM;
if (deltaR < *deltaRMin){
isTcompat = false;
}
if (deltaR > *deltaRMax){
isTcompat = false;
}
if (isTcompat == true){
float cotTheta = (zT - zM)/deltaR;
if (fabsf(cotTheta) > *cotThetaMax){
isTcompat = false;
}
float zOrigin = zM - rM*cotTheta;
if (zOrigin < *collisionRegionMin || zOrigin > *collisionRegionMax){
isTcompat = false;
}
}
if ( isTcompat == true ){
int tPos = atomicAdd(&nSpTcompPerSpM[blockIdx.x], 1);
tmpTcompIndex[tPos+(*nSpT)*blockIdx.x]=threadIdx.x+offset;
}
}
offset += blockDim.x;
}
__syncthreads();
if (threadIdx.x == 0){
if (nSpBcompPerSpM[blockIdx.x] > 0 && nSpTcompPerSpM[blockIdx.x] > 0 ){
*mPos = atomicAdd(nSpMcomp,1);
*isMcompat = true;
McompIndex[*mPos] = blockIdx.x;
int bMax = atomicMax(nSpBcompPerSpM_Max,nSpBcompPerSpM[blockIdx.x]);
int tMax = atomicMax(nSpTcompPerSpM_Max,nSpTcompPerSpM[blockIdx.x]);
}
}
__syncthreads();
if (*isMcompat == true){
offset = 0;
while(offset< max(nSpBcompPerSpM[blockIdx.x], nSpTcompPerSpM[blockIdx.x] ) ){
if (threadIdx.x+offset < nSpBcompPerSpM[blockIdx.x]){
BcompIndex[threadIdx.x+offset+(*nSpB)*(*mPos)]
= tmpBcompIndex[threadIdx.x+offset+(*nSpB)*blockIdx.x];
}
if (threadIdx.x+offset < nSpTcompPerSpM[blockIdx.x]){
TcompIndex[threadIdx.x+offset+(*nSpT)*(*mPos)]
= tmpTcompIndex[threadIdx.x+offset+(*nSpT)*blockIdx.x];
}
offset += blockDim.x;
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.